diff --git a/src/struphy/bsplines/bsplines.py b/src/struphy/bsplines/bsplines.py index 9974a9ff2..a659e4c61 100644 --- a/src/struphy/bsplines/bsplines.py +++ b/src/struphy/bsplines/bsplines.py @@ -16,7 +16,7 @@ """ -import cunumpy as xp +from struphy.utils.arrays import xp as np __all__ = [ "find_span", @@ -105,7 +105,7 @@ def scaling_vector(knots, degree, span): Scaling vector with elements (p + 1)/(t[i + p + 1] - t[i]) """ - x = xp.zeros(degree + 1, dtype=float) + x = np.zeros(degree + 1, dtype=float) for il in range(degree + 1): i = span - il @@ -148,9 +148,9 @@ def basis_funs(knots, degree, x, span, normalize=False): by using 'left' and 'right' temporary arrays that are one element shorter. """ - left = xp.empty(degree, dtype=float) - right = xp.empty(degree, dtype=float) - values = xp.empty(degree + 1, dtype=float) + left = np.empty(degree, dtype=float) + right = np.empty(degree, dtype=float) + values = np.empty(degree + 1, dtype=float) values[0] = 1.0 @@ -164,7 +164,7 @@ def basis_funs(knots, degree, x, span, normalize=False): saved = left[j - r] * temp values[j + 1] = saved - if normalize: + if normalize == True: values = values * scaling_vector(knots, degree, span) return values @@ -205,7 +205,7 @@ def basis_funs_1st_der(knots, degree, x, span): # Compute derivatives at x using formula based on difference of splines of degree deg - 1 # ------- # j = 0 - ders = xp.empty(degree + 1, dtype=float) + ders = np.empty(degree + 1, dtype=float) saved = degree * values[0] / (knots[span + 1] - knots[span + 1 - degree]) ders[0] = -saved @@ -261,11 +261,11 @@ def basis_funs_all_ders(knots, degree, x, span, n): - innermost loops are replaced with vector operations on slices. """ - left = xp.empty(degree) - right = xp.empty(degree) - ndu = xp.empty((degree + 1, degree + 1)) - a = xp.empty((2, degree + 1)) - ders = xp.zeros((n + 1, degree + 1)) # output array + left = np.empty(degree) + right = np.empty(degree) + ndu = np.empty((degree + 1, degree + 1)) + a = np.empty((2, degree + 1)) + ders = np.zeros((n + 1, degree + 1)) # output array # Number of derivatives that need to be effectively computed # Derivatives higher than degree are = 0. @@ -304,7 +304,7 @@ def basis_funs_all_ders(knots, degree, x, span, n): j1 = 1 if (rk > -1) else -rk j2 = k - 1 if (r - 1 <= pk) else degree - r a[s2, j1 : j2 + 1] = (a[s1, j1 : j2 + 1] - a[s1, j1 - 1 : j2]) * ndu[pk + 1, rk + j1 : rk + j2 + 1] - d += xp.dot(a[s2, j1 : j2 + 1], ndu[rk + j1 : rk + j2 + 1, pk]) + d += np.dot(a[s2, j1 : j2 + 1], ndu[rk + j1 : rk + j2 + 1, pk]) if r <= pk: a[s2, k] = -a[s1, k - 1] * ndu[pk + 1, r] d += a[s2, k] * ndu[r, pk] @@ -362,7 +362,7 @@ def collocation_matrix(knots, degree, xgrid, periodic, normalize=False): nx = len(xgrid) # Collocation matrix as 2D Numpy array (dense storage) - mat = xp.zeros((nx, nb), dtype=float) + mat = np.zeros((nx, nb), dtype=float) # Indexing of basis functions (periodic or not) for a given span if periodic: @@ -418,12 +418,12 @@ def histopolation_matrix(knots, degree, xgrid, periodic): # Number of integrals if periodic: el_b = breakpoints(knots, degree) - xgrid = xp.array([el_b[0]] + list(xgrid) + [el_b[-1]]) + xgrid = np.array([el_b[0]] + list(xgrid) + [el_b[-1]]) ni = len(xgrid) - 1 # Histopolation matrix of M-splines as 2D Numpy array (dense storage) - his = xp.zeros((ni, nbD), dtype=float) + his = np.zeros((ni, nbD), dtype=float) # Collocation matrix of B-splines col = collocation_matrix(knots, degree, xgrid, False, normalize=False) @@ -434,7 +434,7 @@ def histopolation_matrix(knots, degree, xgrid, periodic): for k in range(j + 1): his[i, j % nbD] += col[i, k] - col[i + 1, k] - if xp.abs(his[i, j % nbD]) < 1e-14: + if np.abs(his[i, j % nbD]) < 1e-14: his[i, j % nbD] = 0.0 # add first to last integration interval in case of periodic splines @@ -470,7 +470,7 @@ def breakpoints(knots, degree): else: endsl = -degree - return xp.unique(knots[slice(degree, endsl)]) + return np.unique(knots[slice(degree, endsl)]) # ============================================================================== @@ -501,13 +501,13 @@ def greville(knots, degree, periodic): n = len(T) - 2 * p - 1 if periodic else len(T) - p - 1 # Compute greville abscissas as average of p consecutive knot values - xg = xp.around([sum(T[i : i + p]) / p for i in range(s, s + n)], decimals=15) + xg = np.around([sum(T[i : i + p]) / p for i in range(s, s + n)], decimals=15) # If needed apply periodic boundary conditions if periodic: a = T[p] b = T[-p] - xg = xp.around((xg - a) % (b - a) + a, decimals=15) + xg = np.around((xg - a) % (b - a) + a, decimals=15) return xg @@ -537,7 +537,7 @@ def elements_spans(knots, degree): >>> from psydac.core.bsplines import make_knots, elements_spans >>> p = 3 ; n = 8 - >>> grid = xp.arange( n-p+1 ) + >>> grid = np.arange( n-p+1 ) >>> knots = make_knots( breaks=grid, degree=p, periodic=False ) >>> spans = elements_spans( knots=knots, degree=p ) >>> spans @@ -549,13 +549,13 @@ def elements_spans(knots, degree): 2) This function could be written in two lines: breaks = breakpoints( knots, degree ) - spans = xp.searchsorted( knots, breaks[:-1], side='right' ) - 1 + spans = np.searchsorted( knots, breaks[:-1], side='right' ) - 1 """ breaks = breakpoints(knots, degree) nk = len(knots) ne = len(breaks) - 1 - spans = xp.zeros(ne, dtype=int) + spans = np.zeros(ne, dtype=int) ie = 0 for ik in range(degree, nk - degree): @@ -600,13 +600,13 @@ def make_knots(breaks, degree, periodic): # Consistency checks assert len(breaks) > 1 - assert all(xp.diff(breaks) > 0) + assert all(np.diff(breaks) > 0) assert degree > 0 if periodic: assert len(breaks) > degree p = degree - T = xp.zeros(len(breaks) + 2 * p, dtype=float) + T = np.zeros(len(breaks) + 2 * p, dtype=float) T[p:-p] = breaks if periodic: @@ -671,13 +671,13 @@ def quadrature_grid(breaks, quad_rule_x, quad_rule_w): assert min(quad_rule_x) >= -1 assert max(quad_rule_x) <= +1 - quad_rule_x = xp.asarray(quad_rule_x) - quad_rule_w = xp.asarray(quad_rule_w) + quad_rule_x = np.asarray(quad_rule_x) + quad_rule_w = np.asarray(quad_rule_w) ne = len(breaks) - 1 nq = len(quad_rule_x) - quad_x = xp.zeros((ne, nq), dtype=float) - quad_w = xp.zeros((ne, nq), dtype=float) + quad_x = np.zeros((ne, nq), dtype=float) + quad_w = np.zeros((ne, nq), dtype=float) # Compute location and weight of quadrature points from basic rule for ie, (a, b) in enumerate(zip(breaks[:-1], breaks[1:])): @@ -724,7 +724,7 @@ def basis_ders_on_quad_grid(knots, degree, quad_grid, nders, normalize=False): # TODO: check if it is safe to compute span only once for each element ne, nq = quad_grid.shape - basis = xp.zeros((ne, degree + 1, nders + 1, nq), dtype=float) + basis = np.zeros((ne, degree + 1, nders + 1, nq), dtype=float) # Loop over elements for ie in range(ne): @@ -735,7 +735,7 @@ def basis_ders_on_quad_grid(knots, degree, quad_grid, nders, normalize=False): span = find_span(knots, degree, xq) ders = basis_funs_all_ders(knots, degree, xq, span, nders) - if normalize: + if normalize == True: ders = ders * scaling_vector(knots, degree, span) basis[ie, :, :, iq] = ders.transpose() diff --git a/src/struphy/bsplines/bsplines_kernels.py b/src/struphy/bsplines/bsplines_kernels.py index 9fa1a9521..17374f178 100644 --- a/src/struphy/bsplines/bsplines_kernels.py +++ b/src/struphy/bsplines/bsplines_kernels.py @@ -83,13 +83,7 @@ def find_span(t: "Final[float[:]]", p: "int", eta: "float") -> "int": @pure def basis_funs( - t: "Final[float[:]]", - p: "int", - eta: "float", - span: "int", - left: "float[:]", - right: "float[:]", - values: "float[:]", + t: "Final[float[:]]", p: "int", eta: "float", span: "int", left: "float[:]", right: "float[:]", values: "float[:]" ): """ Parameters @@ -601,13 +595,7 @@ def basis_funs_and_der( @pure @stack_array("values_b") def basis_funs_1st_der( - t: "Final[float[:]]", - p: "int", - eta: "float", - span: "int", - left: "float[:]", - right: "float[:]", - values: "float[:]", + t: "Final[float[:]]", p: "int", eta: "float", span: "int", left: "float[:]", right: "float[:]", values: "float[:]" ): """ Parameters diff --git a/src/struphy/bsplines/evaluation_kernels_1d.py b/src/struphy/bsplines/evaluation_kernels_1d.py index 6510eafff..a6ec8b7a5 100644 --- a/src/struphy/bsplines/evaluation_kernels_1d.py +++ b/src/struphy/bsplines/evaluation_kernels_1d.py @@ -61,12 +61,7 @@ def evaluation_kernel_1d(p1: int, basis1: "Final[float[:]]", ind1: "Final[int[:] @pure @stack_array("tmp1", "tmp2") def evaluate( - kind1: int, - t1: "Final[float[:]]", - p1: int, - ind1: "Final[int[:,:]]", - coeff: "Final[float[:]]", - eta1: float, + kind1: int, t1: "Final[float[:]]", p1: int, ind1: "Final[int[:,:]]", coeff: "Final[float[:]]", eta1: float ) -> float: """ Point-wise evaluation of a spline. diff --git a/src/struphy/bsplines/evaluation_kernels_3d.py b/src/struphy/bsplines/evaluation_kernels_3d.py index a6c900616..8ccaa252b 100644 --- a/src/struphy/bsplines/evaluation_kernels_3d.py +++ b/src/struphy/bsplines/evaluation_kernels_3d.py @@ -246,212 +246,47 @@ def evaluate_tensor_product( for i3 in range(len(eta3)): if kind == 0: spline_values[i1, i2, i3] = evaluate_3d( - 1, - 1, - 1, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 1, 1, 1, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 11: spline_values[i1, i2, i3] = evaluate_3d( - 2, - 1, - 1, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 2, 1, 1, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 12: spline_values[i1, i2, i3] = evaluate_3d( - 1, - 2, - 1, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 1, 2, 1, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 13: spline_values[i1, i2, i3] = evaluate_3d( - 1, - 1, - 2, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 1, 1, 2, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 21: spline_values[i1, i2, i3] = evaluate_3d( - 1, - 2, - 2, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 1, 2, 2, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 22: spline_values[i1, i2, i3] = evaluate_3d( - 2, - 1, - 2, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 2, 1, 2, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 23: spline_values[i1, i2, i3] = evaluate_3d( - 2, - 2, - 1, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 2, 2, 1, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 3: spline_values[i1, i2, i3] = evaluate_3d( - 2, - 2, - 2, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 2, 2, 2, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 41: spline_values[i1, i2, i3] = evaluate_3d( - 3, - 1, - 1, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 3, 1, 1, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 42: spline_values[i1, i2, i3] = evaluate_3d( - 1, - 3, - 1, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 1, 3, 1, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 43: spline_values[i1, i2, i3] = evaluate_3d( - 1, - 1, - 3, - t1, - t2, - t3, - p1, - p2, - p3, - ind1, - ind2, - ind3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + 1, 1, 3, t1, t2, t3, p1, p2, p3, ind1, ind2, ind3, coeff, eta1[i1], eta2[i2], eta3[i3] ) @@ -1216,17 +1051,7 @@ def eval_spline_mpi( b3 = bd3 value = eval_spline_mpi_kernel( - pn[0] - kind[0], - pn[1] - kind[1], - pn[2] - kind[2], - b1, - b2, - b3, - span1, - span2, - span3, - _data, - starts, + pn[0] - kind[0], pn[1] - kind[1], pn[2] - kind[2], b1, b2, b3, span1, span2, span3, _data, starts ) return value @@ -1371,17 +1196,7 @@ def eval_spline_mpi_tensor_product_fast( b3 = bd3 values[i, j, k] = eval_spline_mpi_kernel( - pn[0] - kind[0], - pn[1] - kind[1], - pn[2] - kind[2], - b1, - b2, - b3, - span1, - span2, - span3, - _data, - starts, + pn[0] - kind[0], pn[1] - kind[1], pn[2] - kind[2], b1, b2, b3, span1, span2, span3, _data, starts ) @@ -1447,17 +1262,7 @@ def eval_spline_mpi_tensor_product_fixed( b3[:] = b3s[k, :] values[i, j, k] = eval_spline_mpi_kernel( - pn[0] - kind[0], - pn[1] - kind[1], - pn[2] - kind[2], - b1, - b2, - b3, - span1, - span2, - span3, - _data, - starts, + pn[0] - kind[0], pn[1] - kind[1], pn[2] - kind[2], b1, b2, b3, span1, span2, span3, _data, starts ) @@ -1515,16 +1320,7 @@ def eval_spline_mpi_matrix( continue # point not in process domain values[i, j, k] = eval_spline_mpi( - eta1[i, j, k], - eta2[i, j, k], - eta3[i, j, k], - _data, - kind, - pn, - tn1, - tn2, - tn3, - starts, + eta1[i, j, k], eta2[i, j, k], eta3[i, j, k], _data, kind, pn, tn1, tn2, tn3, starts ) @@ -1586,16 +1382,7 @@ def eval_spline_mpi_sparse_meshgrid( continue # point not in process domain values[i, j, k] = eval_spline_mpi( - eta1[i, 0, 0], - eta2[0, j, 0], - eta3[0, 0, k], - _data, - kind, - pn, - tn1, - tn2, - tn3, - starts, + eta1[i, 0, 0], eta2[0, j, 0], eta3[0, 0, k], _data, kind, pn, tn1, tn2, tn3, starts ) @@ -1645,16 +1432,7 @@ def eval_spline_mpi_markers( continue # point not in process domain values[ip] = eval_spline_mpi( - markers[ip, 0], - markers[ip, 1], - markers[ip, 2], - _data, - kind, - pn, - tn1, - tn2, - tn3, - starts, + markers[ip, 0], markers[ip, 1], markers[ip, 2], _data, kind, pn, tn1, tn2, tn3, starts ) @@ -1675,39 +1453,20 @@ def get_spans(eta1: float, eta2: float, eta3: float, args_derham: "DerhamArgumen # get spline values at eta bsplines_kernels.b_d_splines_slim( - args_derham.tn1, - args_derham.pn[0], - eta1, - int(span1), - args_derham.bn1, - args_derham.bd1, + args_derham.tn1, args_derham.pn[0], eta1, int(span1), args_derham.bn1, args_derham.bd1 ) bsplines_kernels.b_d_splines_slim( - args_derham.tn2, - args_derham.pn[1], - eta2, - int(span2), - args_derham.bn2, - args_derham.bd2, + args_derham.tn2, args_derham.pn[1], eta2, int(span2), args_derham.bn2, args_derham.bd2 ) bsplines_kernels.b_d_splines_slim( - args_derham.tn3, - args_derham.pn[2], - eta3, - int(span3), - args_derham.bn3, - args_derham.bd3, + args_derham.tn3, args_derham.pn[2], eta3, int(span3), args_derham.bn3, args_derham.bd3 ) return span1, span2, span3 def eval_0form_spline_mpi( - span1: int, - span2: int, - span3: int, - args_derham: "DerhamArguments", - form_coeffs: "float[:,:,:]", + span1: int, span2: int, span3: int, args_derham: "DerhamArguments", form_coeffs: "float[:,:,:]" ) -> float: """Single-point evaluation of Derham 0-form spline defined by form_coeffs, given N-spline values (in bn) and knot span indices span.""" @@ -1843,11 +1602,7 @@ def eval_2form_spline_mpi( def eval_3form_spline_mpi( - span1: int, - span2: int, - span3: int, - args_derham: "DerhamArguments", - form_coeffs: "float[:,:,:]", + span1: int, span2: int, span3: int, args_derham: "DerhamArguments", form_coeffs: "float[:,:,:]" ) -> float: """Single-point evaluation of Derham 0-form spline defined by form_coeffs, given D-spline values (in bd) and knot span indices span.""" diff --git a/src/struphy/bsplines/tests/test_bsplines_kernels.py b/src/struphy/bsplines/tests/test_bsplines_kernels.py index c1010dd08..1a16712c1 100644 --- a/src/struphy/bsplines/tests/test_bsplines_kernels.py +++ b/src/struphy/bsplines/tests/test_bsplines_kernels.py @@ -1,9 +1,10 @@ import time -import cunumpy as xp import pytest from psydac.ddm.mpi import mpi as MPI +from struphy.utils.arrays import xp as np + @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @pytest.mark.parametrize("p", [[1, 2, 1], [2, 1, 2], [3, 4, 3]]) @@ -33,9 +34,9 @@ def test_bsplines_span_and_basis(Nel, p, spl_kind): # Random points in domain of process n_pts = 100 dom = derham.domain_array[rank] - eta1s = xp.random.rand(n_pts) * (dom[1] - dom[0]) + dom[0] - eta2s = xp.random.rand(n_pts) * (dom[4] - dom[3]) + dom[3] - eta3s = xp.random.rand(n_pts) * (dom[7] - dom[6]) + dom[6] + eta1s = np.random.rand(n_pts) * (dom[1] - dom[0]) + dom[0] + eta2s = np.random.rand(n_pts) * (dom[4] - dom[3]) + dom[3] + eta3s = np.random.rand(n_pts) * (dom[7] - dom[6]) + dom[6] # struphy find_span t0 = time.time() @@ -59,18 +60,18 @@ def test_bsplines_span_and_basis(Nel, p, spl_kind): if rank == 0: print(f"psydac find_span_p : {t1 - t0}") - assert xp.allclose(span1s, span1s_psy) - assert xp.allclose(span2s, span2s_psy) - assert xp.allclose(span3s, span3s_psy) + assert np.allclose(span1s, span1s_psy) + assert np.allclose(span2s, span2s_psy) + assert np.allclose(span3s, span3s_psy) # allocate tmps - bn1 = xp.empty(derham.p[0] + 1, dtype=float) - bn2 = xp.empty(derham.p[1] + 1, dtype=float) - bn3 = xp.empty(derham.p[2] + 1, dtype=float) + bn1 = np.empty(derham.p[0] + 1, dtype=float) + bn2 = np.empty(derham.p[1] + 1, dtype=float) + bn3 = np.empty(derham.p[2] + 1, dtype=float) - bd1 = xp.empty(derham.p[0], dtype=float) - bd2 = xp.empty(derham.p[1], dtype=float) - bd3 = xp.empty(derham.p[2], dtype=float) + bd1 = np.empty(derham.p[0], dtype=float) + bd2 = np.empty(derham.p[1], dtype=float) + bd3 = np.empty(derham.p[2], dtype=float) # struphy b_splines_slim val1s, val2s, val3s = [], [], [] @@ -102,13 +103,13 @@ def test_bsplines_span_and_basis(Nel, p, spl_kind): # compare for val1, val1_psy in zip(val1s, val1s_psy): - assert xp.allclose(val1, val1_psy) + assert np.allclose(val1, val1_psy) for val2, val2_psy in zip(val2s, val2s_psy): - assert xp.allclose(val2, val2_psy) + assert np.allclose(val2, val2_psy) for val3, val3_psy in zip(val3s, val3s_psy): - assert xp.allclose(val3, val3_psy) + assert np.allclose(val3, val3_psy) # struphy b_d_splines_slim val1s_n, val2s_n, val3s_n = [], [], [] @@ -130,13 +131,13 @@ def test_bsplines_span_and_basis(Nel, p, spl_kind): # compare for val1, val1_psy in zip(val1s_n, val1s_psy): - assert xp.allclose(val1, val1_psy) + assert np.allclose(val1, val1_psy) for val2, val2_psy in zip(val2s_n, val2s_psy): - assert xp.allclose(val2, val2_psy) + assert np.allclose(val2, val2_psy) for val3, val3_psy in zip(val3s_n, val3s_psy): - assert xp.allclose(val3, val3_psy) + assert np.allclose(val3, val3_psy) # struphy d_splines_slim span1s, span2s, span3s = [], [], [] @@ -174,22 +175,22 @@ def test_bsplines_span_and_basis(Nel, p, spl_kind): # compare for val1, val1_psy in zip(val1s, val1s_psy): - assert xp.allclose(val1, val1_psy) + assert np.allclose(val1, val1_psy) for val2, val2_psy in zip(val2s, val2s_psy): - assert xp.allclose(val2, val2_psy) + assert np.allclose(val2, val2_psy) for val3, val3_psy in zip(val3s, val3s_psy): - assert xp.allclose(val3, val3_psy) + assert np.allclose(val3, val3_psy) for val1, val1_psy in zip(val1s_d, val1s_psy): - assert xp.allclose(val1, val1_psy) + assert np.allclose(val1, val1_psy) for val2, val2_psy in zip(val2s_d, val2s_psy): - assert xp.allclose(val2, val2_psy) + assert np.allclose(val2, val2_psy) for val3, val3_psy in zip(val3s_d, val3s_psy): - assert xp.allclose(val3, val3_psy) + assert np.allclose(val3, val3_psy) if __name__ == "__main__": diff --git a/src/struphy/bsplines/tests/test_eval_spline_mpi.py b/src/struphy/bsplines/tests/test_eval_spline_mpi.py index 923fc8ea6..e40af4124 100644 --- a/src/struphy/bsplines/tests/test_eval_spline_mpi.py +++ b/src/struphy/bsplines/tests/test_eval_spline_mpi.py @@ -1,10 +1,11 @@ from sys import int_info from time import sleep -import cunumpy as xp import pytest from psydac.ddm.mpi import mpi as MPI +from struphy.utils.arrays import xp as np + @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @pytest.mark.parametrize("p", [[1, 2, 3], [3, 1, 2]]) @@ -37,9 +38,9 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): # Random points in domain of process dom = derham.domain_array[rank] - eta1s = xp.random.rand(n_markers) * (dom[1] - dom[0]) + dom[0] - eta2s = xp.random.rand(n_markers) * (dom[4] - dom[3]) + dom[3] - eta3s = xp.random.rand(n_markers) * (dom[7] - dom[6]) + dom[6] + eta1s = np.random.rand(n_markers) * (dom[1] - dom[0]) + dom[0] + eta2s = np.random.rand(n_markers) * (dom[4] - dom[3]) + dom[3] + eta3s = np.random.rand(n_markers) * (dom[7] - dom[6]) + dom[6] for eta1, eta2, eta3 in zip(eta1s, eta2s, eta3s): comm.Barrier() @@ -55,13 +56,13 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): span3 = bsp.find_span(tn3, derham.p[2], eta3) # non-zero spline values at eta - bn1 = xp.empty(derham.p[0] + 1, dtype=float) - bn2 = xp.empty(derham.p[1] + 1, dtype=float) - bn3 = xp.empty(derham.p[2] + 1, dtype=float) + bn1 = np.empty(derham.p[0] + 1, dtype=float) + bn2 = np.empty(derham.p[1] + 1, dtype=float) + bn3 = np.empty(derham.p[2] + 1, dtype=float) - bd1 = xp.empty(derham.p[0], dtype=float) - bd2 = xp.empty(derham.p[1], dtype=float) - bd3 = xp.empty(derham.p[2], dtype=float) + bd1 = np.empty(derham.p[0], dtype=float) + bd2 = np.empty(derham.p[1], dtype=float) + bd3 = np.empty(derham.p[2], dtype=float) bsp.b_d_splines_slim(tn1, derham.p[0], eta1, span1, bn1, bd1) bsp.b_d_splines_slim(tn2, derham.p[1], eta2, span2, bn2, bd2) @@ -82,8 +83,8 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): # compare spline evaluation routines in V0 val = eval3d(*derham.p, bn1, bn2, bn3, ind_n1, ind_n2, ind_n3, x0[0]) - val_mpi = eval3d_mpi(*derham.p, bn1, bn2, bn3, span1, span2, span3, x0_psy._data, xp.array(x0_psy.starts)) - assert xp.allclose(val, val_mpi) + val_mpi = eval3d_mpi(*derham.p, bn1, bn2, bn3, span1, span2, span3, x0_psy._data, np.array(x0_psy.starts)) + assert np.allclose(val, val_mpi) # compare spline evaluation routines in V1 val = eval3d(derham.p[0] - 1, derham.p[1], derham.p[2], bd1, bn2, bn3, ind_d1, ind_n2, ind_n3, x1[0]) @@ -98,9 +99,9 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): span2, span3, x1_psy[0]._data, - xp.array(x1_psy[0].starts), + np.array(x1_psy[0].starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) val = eval3d(derham.p[0], derham.p[1] - 1, derham.p[2], bn1, bd2, bn3, ind_n1, ind_d2, ind_n3, x1[1]) val_mpi = eval3d_mpi( @@ -114,9 +115,9 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): span2, span3, x1_psy[1]._data, - xp.array(x1_psy[1].starts), + np.array(x1_psy[1].starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) val = eval3d(derham.p[0], derham.p[1], derham.p[2] - 1, bn1, bn2, bd3, ind_n1, ind_n2, ind_d3, x1[2]) val_mpi = eval3d_mpi( @@ -130,9 +131,9 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): span2, span3, x1_psy[2]._data, - xp.array(x1_psy[2].starts), + np.array(x1_psy[2].starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # compare spline evaluation routines in V2 val = eval3d(derham.p[0], derham.p[1] - 1, derham.p[2] - 1, bn1, bd2, bd3, ind_n1, ind_d2, ind_d3, x2[0]) @@ -147,9 +148,9 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): span2, span3, x2_psy[0]._data, - xp.array(x2_psy[0].starts), + np.array(x2_psy[0].starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) val = eval3d(derham.p[0] - 1, derham.p[1], derham.p[2] - 1, bd1, bn2, bd3, ind_d1, ind_n2, ind_d3, x2[1]) val_mpi = eval3d_mpi( @@ -163,9 +164,9 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): span2, span3, x2_psy[1]._data, - xp.array(x2_psy[1].starts), + np.array(x2_psy[1].starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) val = eval3d(derham.p[0] - 1, derham.p[1] - 1, derham.p[2], bd1, bd2, bn3, ind_d1, ind_d2, ind_n3, x2[2]) val_mpi = eval3d_mpi( @@ -179,9 +180,9 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): span2, span3, x2_psy[2]._data, - xp.array(x2_psy[2].starts), + np.array(x2_psy[2].starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # compare spline evaluation routines in V3 val = eval3d(derham.p[0] - 1, derham.p[1] - 1, derham.p[2] - 1, bd1, bd2, bd3, ind_d1, ind_d2, ind_d3, x3[0]) @@ -196,9 +197,9 @@ def test_eval_kernels(Nel, p, spl_kind, n_markers=10): span2, span3, x3_psy._data, - xp.array(x3_psy.starts), + np.array(x3_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @@ -229,9 +230,9 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): # Random points in domain of process dom = derham.domain_array[rank] - eta1s = xp.random.rand(n_markers) * (dom[1] - dom[0]) + dom[0] - eta2s = xp.random.rand(n_markers) * (dom[4] - dom[3]) + dom[3] - eta3s = xp.random.rand(n_markers) * (dom[7] - dom[6]) + dom[6] + eta1s = np.random.rand(n_markers) * (dom[1] - dom[0]) + dom[0] + eta2s = np.random.rand(n_markers) * (dom[4] - dom[3]) + dom[3] + eta3s = np.random.rand(n_markers) * (dom[7] - dom[6]) + dom[6] for eta1, eta2, eta3 in zip(eta1s, eta2s, eta3s): comm.Barrier() @@ -250,14 +251,14 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): eta3, x0_psy._data, derham.spline_types_pyccel["0"], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # compare spline evaluation routines in V1 # 1st component @@ -286,14 +287,14 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): eta3, x1_psy[0]._data, derham.spline_types_pyccel["1"][0], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # 2nd component val = evaluate_3d( @@ -321,14 +322,14 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): eta3, x1_psy[1]._data, derham.spline_types_pyccel["1"][1], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # 3rd component val = evaluate_3d( @@ -356,14 +357,14 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): eta3, x1_psy[2]._data, derham.spline_types_pyccel["1"][2], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # compare spline evaluation routines in V2 # 1st component @@ -392,14 +393,14 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): eta3, x2_psy[0]._data, derham.spline_types_pyccel["2"][0], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # 2nd component val = evaluate_3d( @@ -427,14 +428,14 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): eta3, x2_psy[1]._data, derham.spline_types_pyccel["2"][1], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # 3rd component val = evaluate_3d( @@ -462,14 +463,14 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): eta3, x2_psy[2]._data, derham.spline_types_pyccel["2"][2], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) # compare spline evaluation routines in V3 val = evaluate_3d( @@ -495,14 +496,14 @@ def test_eval_pointwise(Nel, p, spl_kind, n_markers=10): eta3, x3_psy._data, derham.spline_types_pyccel["3"], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), ) - assert xp.allclose(val, val_mpi) + assert np.allclose(val, val_mpi) @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @@ -543,13 +544,13 @@ def test_eval_tensor_product(Nel, p, spl_kind, n_markers=10): # Random points in domain of process dom = derham.domain_array[rank] - eta1s = xp.random.rand(n_markers) * (dom[1] - dom[0]) + dom[0] - eta2s = xp.random.rand(n_markers + 1) * (dom[4] - dom[3]) + dom[3] - eta3s = xp.random.rand(n_markers + 2) * (dom[7] - dom[6]) + dom[6] + eta1s = np.random.rand(n_markers) * (dom[1] - dom[0]) + dom[0] + eta2s = np.random.rand(n_markers + 1) * (dom[4] - dom[3]) + dom[3] + eta3s = np.random.rand(n_markers + 2) * (dom[7] - dom[6]) + dom[6] - vals = xp.zeros((n_markers, n_markers + 1, n_markers + 2), dtype=float) - vals_mpi = xp.zeros((n_markers, n_markers + 1, n_markers + 2), dtype=float) - vals_mpi_fast = xp.zeros((n_markers, n_markers + 1, n_markers + 2), dtype=float) + vals = np.zeros((n_markers, n_markers + 1, n_markers + 2), dtype=float) + vals_mpi = np.zeros((n_markers, n_markers + 1, n_markers + 2), dtype=float) + vals_mpi_fast = np.zeros((n_markers, n_markers + 1, n_markers + 2), dtype=float) comm.Barrier() sleep(0.02 * (rank + 1)) @@ -572,11 +573,11 @@ def test_eval_tensor_product(Nel, p, spl_kind, n_markers=10): eta3s, x0_psy._data, derham.spline_types_pyccel["0"], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), vals_mpi, ) t1 = time.time() @@ -590,19 +591,19 @@ def test_eval_tensor_product(Nel, p, spl_kind, n_markers=10): eta3s, x0_psy._data, derham.spline_types_pyccel["0"], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), vals_mpi_fast, ) t1 = time.time() if rank == 0: print("v0 eval_spline_mpi_tensor_product_fast:".ljust(40), t1 - t0) - assert xp.allclose(vals, vals_mpi) - assert xp.allclose(vals, vals_mpi_fast) + assert np.allclose(vals, vals_mpi) + assert np.allclose(vals, vals_mpi_fast) # compare spline evaluation routines in V3 t0 = time.time() @@ -632,11 +633,11 @@ def test_eval_tensor_product(Nel, p, spl_kind, n_markers=10): eta3s, x3_psy._data, derham.spline_types_pyccel["3"], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), vals_mpi, ) t1 = time.time() @@ -650,19 +651,19 @@ def test_eval_tensor_product(Nel, p, spl_kind, n_markers=10): eta3s, x3_psy._data, derham.spline_types_pyccel["3"], - xp.array(derham.p), + np.array(derham.p), tn1, tn2, tn3, - xp.array(x0_psy.starts), + np.array(x0_psy.starts), vals_mpi_fast, ) t1 = time.time() if rank == 0: print("v3 eval_spline_mpi_tensor_product_fast:".ljust(40), t1 - t0) - assert xp.allclose(vals, vals_mpi) - assert xp.allclose(vals, vals_mpi_fast) + assert np.allclose(vals, vals_mpi) + assert np.allclose(vals, vals_mpi_fast) @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @@ -700,10 +701,7 @@ def test_eval_tensor_product_grid(Nel, p, spl_kind, n_markers=10): # Histopolation grids spaces = derham.Vh_fem["3"].spaces ptsG, wtsG, spans, bases, subs = prepare_projection_of_basis( - spaces, - spaces, - derham.Vh["3"].starts, - derham.Vh["3"].ends, + spaces, spaces, derham.Vh["3"].starts, derham.Vh["3"].ends ) eta1s = ptsG[0].flatten() eta2s = ptsG[1].flatten() @@ -712,15 +710,15 @@ def test_eval_tensor_product_grid(Nel, p, spl_kind, n_markers=10): spans_f, bns_f, bds_f = derham.prepare_eval_tp_fixed([eta1s, eta2s, eta3s]) # output arrays - vals = xp.zeros((eta1s.size, eta2s.size, eta3s.size), dtype=float) - vals_mpi_fixed = xp.zeros((eta1s.size, eta2s.size, eta3s.size), dtype=float) - vals_mpi_grid = xp.zeros((eta1s.size, eta2s.size, eta3s.size), dtype=float) + vals = np.zeros((eta1s.size, eta2s.size, eta3s.size), dtype=float) + vals_mpi_fixed = np.zeros((eta1s.size, eta2s.size, eta3s.size), dtype=float) + vals_mpi_grid = np.zeros((eta1s.size, eta2s.size, eta3s.size), dtype=float) comm.Barrier() sleep(0.02 * (rank + 1)) - print(f"rank {rank} | {eta1s =}") - print(f"rank {rank} | {eta2s =}") - print(f"rank {rank} | {eta3s =}\n") + print(f"rank {rank} | {eta1s = }") + print(f"rank {rank} | {eta2s = }") + print(f"rank {rank} | {eta3s = }\n") comm.Barrier() # compare spline evaluation routines @@ -750,20 +748,20 @@ def test_eval_tensor_product_grid(Nel, p, spl_kind, n_markers=10): *bds_f, x3_psy._data, derham.spline_types_pyccel["3"], - xp.array(derham.p), - xp.array(x0_psy.starts), + np.array(derham.p), + np.array(x0_psy.starts), vals_mpi_fixed, ) t1 = time.time() if rank == 0: print("v3 eval_spline_mpi_tensor_product_fixed:".ljust(40), t1 - t0) - assert xp.allclose(vals, vals_mpi_fixed) + assert np.allclose(vals, vals_mpi_fixed) field = derham.create_spline_function("test", "L2") field.vector = x3_psy - assert xp.allclose(field.vector._data, x3_psy._data) + assert np.allclose(field.vector._data, x3_psy._data) t0 = time.time() field.eval_tp_fixed_loc(spans_f, bds_f, out=vals_mpi_fixed) @@ -771,7 +769,7 @@ def test_eval_tensor_product_grid(Nel, p, spl_kind, n_markers=10): if rank == 0: print("v3 field.eval_tp_fixed:".ljust(40), t1 - t0) - assert xp.allclose(vals, vals_mpi_fixed) + assert np.allclose(vals, vals_mpi_fixed) if __name__ == "__main__": diff --git a/src/struphy/compile_struphy.mk b/src/struphy/compile_struphy.mk index 2aaec6cc6..62b310eab 100644 --- a/src/struphy/compile_struphy.mk +++ b/src/struphy/compile_struphy.mk @@ -5,7 +5,7 @@ PYTHON := python3 SO_EXT := $(shell $(PYTHON) -c "import sysconfig; print(sysconfig.get_config_var('EXT_SUFFIX'))") LIBDIR := $(shell $(PYTHON) -c "import sysconfig; print(sysconfig.get_config_var('LIBDIR'))") -struphy_path := $(shell $(PYTHON) -c "import struphy; print(struphy.__path__[0])") +struphy_path := $(shell $(PYTHON) -c "import struphy as _; print(_.__path__[0])") # Arguments to this script are: STRUPHY_SOURCES := $(sources) diff --git a/src/struphy/conftest.py b/src/struphy/conftest.py index 05e10b55e..5a936455c 100644 --- a/src/struphy/conftest.py +++ b/src/struphy/conftest.py @@ -1,14 +1,18 @@ def pytest_addoption(parser): + parser.addoption("--fast", action="store_true") parser.addoption("--with-desc", action="store_true") parser.addoption("--vrbose", action="store_true") + parser.addoption("--verification", action="store_true") parser.addoption("--show-plots", action="store_true") parser.addoption("--nclones", type=int, default=1) - parser.addoption("--model-name", type=str, default="Maxwell") def pytest_generate_tests(metafunc): # This is called for every test. Only get/set command line arguments - # if the argument is specified in the list of test "fixturenames".]) + # if the argument is specified in the list of test "fixturenames". + option_value = metafunc.config.option.fast + if "fast" in metafunc.fixturenames and option_value is not None: + metafunc.parametrize("fast", [option_value]) option_value = metafunc.config.option.with_desc if "with_desc" in metafunc.fixturenames and option_value is not None: @@ -18,6 +22,10 @@ def pytest_generate_tests(metafunc): if "vrbose" in metafunc.fixturenames and option_value is not None: metafunc.parametrize("vrbose", [option_value]) + option_value = metafunc.config.option.verification + if "verification" in metafunc.fixturenames and option_value is not None: + metafunc.parametrize("verification", [option_value]) + option_value = metafunc.config.option.nclones if "nclones" in metafunc.fixturenames and option_value is not None: metafunc.parametrize("nclones", [option_value]) @@ -25,7 +33,3 @@ def pytest_generate_tests(metafunc): option_value = metafunc.config.option.show_plots if "show_plots" in metafunc.fixturenames and option_value is not None: metafunc.parametrize("show_plots", [option_value]) - - option_value = metafunc.config.option.model_name - if "model_name" in metafunc.fixturenames and option_value is not None: - metafunc.parametrize("model_name", [option_value]) diff --git a/src/struphy/console/compile.py b/src/struphy/console/compile.py index d92f31285..0cb628b18 100644 --- a/src/struphy/console/compile.py +++ b/src/struphy/console/compile.py @@ -4,17 +4,7 @@ def struphy_compile( - language, - compiler, - compiler_config, - omp_pic, - omp_feec, - delete, - status, - verbose, - dependencies, - time_execution, - yes, + language, compiler, compiler_config, omp_pic, omp_feec, delete, status, verbose, dependencies, time_execution, yes ): """Compile Struphy kernels. All files that contain "kernels" are detected automatically and saved to state.yml. @@ -197,9 +187,9 @@ def struphy_compile( deps = depmod.get_dependencies(ker.replace(".py", so_suffix)) deps_li = deps.split(" ") print("-" * 28) - print(f"{ker =}") + print(f"{ker = }") for dep in deps_li: - print(f"{dep =}") + print(f"{dep = }") else: # struphy and psydac (change dir not to be in source path) @@ -268,11 +258,11 @@ def struphy_compile( # only install (from .whl) if psydac not up-to-date if psydac_ver < struphy_ver: print( - f"You have psydac version {psydac_ver}, but version {struphy_ver} is available. Please re-install struphy (e.g. pip install .)\n", + f"You have psydac version {psydac_ver}, but version {struphy_ver} is available. Please re-install struphy (e.g. pip install .)\n" ) sys.exit(1) else: - print("Psydac is not installed. To install it, please re-install struphy (e.g. pip install .)\n") + print(f"Psydac is not installed. To install it, please re-install struphy (e.g. pip install .)\n") sys.exit(1) else: diff --git a/src/struphy/console/format.py b/src/struphy/console/format.py index 7ba6795c4..747e2d0c1 100644 --- a/src/struphy/console/format.py +++ b/src/struphy/console/format.py @@ -409,7 +409,7 @@ def parse_path(directory): for filename in files: if re.search(r"__\w+__", root): continue - if (filename.endswith(".py") or filename.endswith(".ipynb")) and not re.search(r"__\w+__", filename): + if filename.endswith(".py") and not re.search(r"__\w+__", filename): file_path = os.path.join(root, filename) python_files.append(file_path) # exit() @@ -484,9 +484,7 @@ def get_python_files(input_type, path=None): # python_files = [f for f in files if f.endswith(".py") and os.path.isfile(f)] python_files = [ - os.path.join(repopath, f) - for f in files - if (f.endswith(".py") or f.endswith(".ipynb")) and os.path.isfile(os.path.join(repopath, f)) + os.path.join(repopath, f) for f in files if f.endswith(".py") and os.path.isfile(os.path.join(repopath, f)) ] if not python_files: @@ -578,7 +576,7 @@ def parse_json_file_to_html(json_file_path, html_output_path): "", "", "Code Analysis Report", - ], + ] ) # Include external CSS and JS libraries @@ -588,7 +586,7 @@ def parse_json_file_to_html(json_file_path, html_output_path): "", "", "", - ], + ] ) # Custom CSS for light mode and code prettification @@ -702,7 +700,7 @@ def parse_json_file_to_html(json_file_path, html_output_path): text-align: center; color: #999999; } -""", +""" ) html_content.append("") @@ -717,7 +715,7 @@ def parse_json_file_to_html(json_file_path, html_output_path): }); }); -""", +""" ) html_content.extend(["", "", "

Code Issues Report

"]) @@ -731,7 +729,7 @@ def parse_json_file_to_html(json_file_path, html_output_path):

Total Issues: {total_issues}

Number of files: {total_files}

-""", +""" ) # Navigation menu @@ -755,7 +753,7 @@ def parse_json_file_to_html(json_file_path, html_output_path): f"""
File: {display_name} -""", +""" ) issue_data = {} @@ -805,7 +803,7 @@ def parse_json_file_to_html(json_file_path, html_output_path): f"{code} - " f"{message}
" f"Location: " - f"{display_name}:{row}:{column}
", + f"{display_name}:{row}:{column}
" ) html_content.append("

") @@ -848,7 +846,7 @@ def parse_json_file_to_html(json_file_path, html_output_path): html_content.append( # f"
" # f"{line_number}{line_content}
" - f"{line_number}: {line_content}", + f"{line_number}: {line_content}" ) html_content.append("") # Include fix details if available @@ -856,12 +854,12 @@ def parse_json_file_to_html(json_file_path, html_output_path): html_content.append("
") html_content.append( f"

Fix Available ({fix.get('applicability', 'Unknown')}): " - f"ruff check --select ALL --fix {display_name}

", + f"ruff check --select ALL --fix {display_name}

" ) html_content.append("
") else: html_content.append( - f"

Cannot read file {filename} or invalid row {row}.

", + f"

Cannot read file {filename} or invalid row {row}.

" ) html_content.append("") @@ -875,7 +873,7 @@ def parse_json_file_to_html(json_file_path, html_output_path): -""", +""" ) html_content.extend(["", ""]) diff --git a/src/struphy/console/main.py b/src/struphy/console/main.py index 545e4a24c..1e2301555 100644 --- a/src/struphy/console/main.py +++ b/src/struphy/console/main.py @@ -16,7 +16,7 @@ # struphy path import struphy -from struphy.utils import utils +import struphy.utils.utils as utils libpath = struphy.__path__[0] __version__ = importlib.metadata.version("struphy") @@ -61,18 +61,17 @@ def struphy(): batch_files = get_batch_files(b_path) # Load the models and messages - model_message = "All models are listed on https://struphy.pages.mpcdf.de/struphy/sections/models.html" list_models = [] - ml_path = os.path.join(libpath, "models", "models_list") - if not os.path.isfile(ml_path): - utils.refresh_models() - - with open(ml_path, "rb") as fp: - list_models = pickle.load(fp) - with open(os.path.join(libpath, "models", "models_message"), "rb") as fp: - model_message, fluid_message, kinetic_message, hybrid_message, toy_message = pickle.load( - fp, - ) + model_message = fluid_message = kinetic_message = hybrid_message = toy_message = "" + try: + with open(os.path.join(libpath, "models", "models_list"), "rb") as fp: + list_models = pickle.load(fp) + with open(os.path.join(libpath, "models", "models_message"), "rb") as fp: + model_message, fluid_message, kinetic_message, hybrid_message, toy_message = pickle.load( + fp, + ) + except: + print("run: struphy --refresh-models") # 0. basic options add_parser_basic_options(parser, i_path, o_path, b_path) @@ -228,7 +227,7 @@ def struphy(): def get_params_files(i_path): if os.path.exists(i_path) and os.path.isdir(i_path): - params_files = recursive_get_files(i_path, contains=(".yml", ".yaml", ".py")) + params_files = recursive_get_files(i_path) else: print("Path to input files missing! Set it with `struphy --set-i PATH`") params_files = [] @@ -452,7 +451,7 @@ def add_parser_run(subparsers, list_models, model_message, params_files, batch_f default=None, # fallback if nothing is passed choices=list_models, metavar="MODEL", - help=model_message + " (default: None)", + help=model_message + f" (default: None)", ) parser_run.add_argument( @@ -684,14 +683,14 @@ def add_parser_params(subparsers, list_models, model_message): "params", formatter_class=lambda prog: argparse.RawTextHelpFormatter( prog, - max_help_position=35, + max_help_position=30, ), help="create default parameter file for a model, or show model's options", - description="Create default parameter file (.py) for a specific model.", + description="Creates a default parameter file for a specific model, or shows a model's options.", ) parser_params.add_argument( - "model_name", + "model", type=str, choices=list_models, metavar="MODEL", @@ -699,11 +698,18 @@ def add_parser_params(subparsers, list_models, model_message): ) parser_params.add_argument( - "-p", - "--params-path", + "-f", + "--file", type=str, - metavar="PATH", - help="Absolute path to the parameter file (default is getcwd()/params_MODEL.py)", + metavar="FILE", + help="name of the parameter file (.yml) to be created in the current I/O path (default=params_.yml)", + ) + + parser_params.add_argument( + "-o", + "--options", + help="show model options", + action="store_true", ) parser_params.add_argument( @@ -716,7 +722,7 @@ def add_parser_params(subparsers, list_models, model_message): parser_params.add_argument( "-y", "--yes", - help="Say yes on prompt to overwrite PATH", + help="Say yes on prompt to overwrite .yml FILE", action="store_true", ) @@ -934,19 +940,11 @@ def add_parser_test(subparsers, list_models): parser_test.add_argument( "group", type=str, - choices=list_models - + ["models"] - + ["unit"] - + ["fluid"] - + ["kinetic"] - + ["hybrid"] - + ["toy"] - + ["verification"], + choices=list_models + ["models"] + ["unit"] + ["fluid"] + ["kinetic"] + ["hybrid"] + ["toy"], metavar="GROUP", help='can be either:\na) a model name \ \nb) "models" for testing of all models (or "fluid", "kinetic", "hybrid", "toy" for testing just a sub-group) \ - \nc) "verification" for running all verification tests \ - \nd) "unit" for performing unit tests', + \nc) "unit" for performing unit tests', ) parser_test.add_argument( @@ -957,12 +955,27 @@ def add_parser_test(subparsers, list_models): default=1, ) + parser_test.add_argument( + "-f", + "--fast", + help="test model(s) just in slab geometry (Cuboid)", + action="store_true", + ) + parser_test.add_argument( "--with-desc", help="include DESC equilibrium in tests (mem consuming)", action="store_true", ) + parser_test.add_argument( + "-T", + "--Tend", + type=float, + help="if GROUP=a), simulation end time in units of the model (default=0.015 with dt=0.005), data is only saved at TEND if set", + default=None, + ) + parser_test.add_argument( "-v", "--vrbose", @@ -970,6 +983,12 @@ def add_parser_test(subparsers, list_models): action="store_true", ) + parser_test.add_argument( + "--verification", + help="perform verification runs specified in io/inp/verification/", + action="store_true", + ) + parser_test.add_argument( "--nclones", type=int, diff --git a/src/struphy/console/params.py b/src/struphy/console/params.py index 4916cd1c0..555e50c76 100644 --- a/src/struphy/console/params.py +++ b/src/struphy/console/params.py @@ -3,34 +3,35 @@ import yaml from psydac.ddm.mpi import mpi as MPI -from struphy.models import fluid, hybrid, kinetic, toy -from struphy.models.base import StruphyModel - -def struphy_params(model_name: str, params_path: str, yes: bool = False, check_file: bool = False): +def struphy_params(model, file, yes=False, options=False, check_file=None): """Create a model's default parameter file and save in current input path. Parameters ---------- - model_name : str + model : str The name of the Struphy model. - params_path : str - An alternative file name to the default params_.yml. - yes : bool If true, say yes on prompt to overwrite .yml FILE + + file : str + An alternative file name to the default params_.yml. + + show_options : bool + Whether to print to screen all possible options for the model. """ + + from struphy.models import fluid, hybrid, kinetic, toy + + # load model class objs = [fluid, kinetic, hybrid, toy] for obj in objs: try: - model_class = getattr(obj, model_name) - model: StruphyModel = model_class() + model_class = getattr(obj, model) except AttributeError: pass - print(f"{model_name =}") - # print units if check_file: print(f"Checking {check_file} with model {model_class}") @@ -45,8 +46,8 @@ def struphy_params(model_name: str, params_path: str, yes: bool = False, check_f print(f"Failed to initialize model: {e}") sys.exit(1) + elif options: + model_class.show_options() else: prompt = not yes - model.generate_default_parameter_file(path=params_path, prompt=prompt) - # print(f"Generating default parameter file for {model_class}.") - # model_class().generate_default_parameter_file(path=params_path, prompt=prompt) + params = model_class.generate_default_parameter_file(file=file, prompt=prompt) diff --git a/src/struphy/console/profile.py b/src/struphy/console/profile.py index a2f10ce66..aa6a36d56 100644 --- a/src/struphy/console/profile.py +++ b/src/struphy/console/profile.py @@ -6,12 +6,12 @@ def struphy_profile(dirs, replace, all, n_lines, print_callers, savefig): import os import pickle - import cunumpy as xp import yaml from matplotlib import pyplot as plt import struphy.utils.utils as utils from struphy.post_processing.cprofile_analyser import get_cprofile_data, replace_keys + from struphy.utils.arrays import xp as np # Read struphy state file state = utils.read_state() @@ -106,7 +106,7 @@ def struphy_profile(dirs, replace, all, n_lines, print_callers, savefig): + "ncalls".ljust(15) + "tottime".ljust(15) + "percall".ljust(15) - + "cumtime".ljust(15), + + "cumtime".ljust(15) ) print("-" * 154) for position, key in enumerate(dicts[0].keys()): @@ -167,10 +167,10 @@ def struphy_profile(dirs, replace, all, n_lines, print_callers, savefig): ratio.append(str(int(float(t) / runtime * 100)) + "%") # strong scaling plot - if xp.all([Nel == val["Nel"][0] for Nel in val["Nel"]]): + if np.all([Nel == val["Nel"][0] for Nel in val["Nel"]]): # ideal scaling if n == 0: - ax.loglog(val["mpi_size"], 1 / 2 ** xp.arange(len(val["time"])), "k--", alpha=0.3, label="ideal") + ax.loglog(val["mpi_size"], 1 / 2 ** np.arange(len(val["time"])), "k--", alpha=0.3, label="ideal") # print average time per one time step if "integrate" in key: @@ -206,11 +206,11 @@ def struphy_profile(dirs, replace, all, n_lines, print_callers, savefig): ax.set_ylabel("time [s]") ax.set( title="Weak scaling for cells/mpi_size=" - + str(xp.prod(val["Nel"][0]) / val["mpi_size"][0]) - + "=const.", + + str(np.prod(val["Nel"][0]) / val["mpi_size"][0]) + + "=const." ) ax.legend(loc="upper left") - # ax.loglog(val['mpi_size'], val['time'][0]*xp.ones_like(val['time']), 'k--', alpha=0.3) + # ax.loglog(val['mpi_size'], val['time'][0]*np.ones_like(val['time']), 'k--', alpha=0.3) ax.set_xscale("log") if savefig is None: diff --git a/src/struphy/console/run.py b/src/struphy/console/run.py index 3fa8ee56d..1845230e9 100644 --- a/src/struphy/console/run.py +++ b/src/struphy/console/run.py @@ -210,7 +210,7 @@ def struphy_run( if likwid: command = likwid_command + command + ["--likwid"] - print(f"Running with likwid with {likwid_repetitions =}") + print(f"Running with likwid with {likwid_repetitions = }") f.write(f"# Launching likwid {likwid_repetitions} times with likwid-mpirun\n") for i in range(likwid_repetitions): f.write(f"\n\n# Run number {i + 1:03}\n") diff --git a/src/struphy/console/test.py b/src/struphy/console/test.py index ab64707e3..804279dc5 100644 --- a/src/struphy/console/test.py +++ b/src/struphy/console/test.py @@ -5,8 +5,11 @@ def struphy_test( group: str, *, mpi: int = 1, + fast: bool = False, with_desc: bool = False, + Tend: float = None, vrbose: bool = False, + verification: bool = False, show_plots: bool = False, nclones: int = 1, ): @@ -16,10 +19,13 @@ def struphy_test( Parameters ---------- group : str - Test identifier: "unit", "models", "fluid", "kinetic", "hybrid", "toy", "verification" or a model name. + Test identifier: "unit", "models", "fluid", "kinetic", "hybrid", "toy" or a model name. mpi : int - Number of MPI processes used in tests (default=1). + Number of MPI processes used in tests (must be >1, default=2). + + fast : bool + Whether to test models just in slab geometry. with_desc : bool Whether to include DESC equilibrium in unit tests (mem consuming). @@ -30,6 +36,9 @@ def struphy_test( vrbose : bool Show full screen output. + verification : bool + Whether to run verification tests specified in io/inp/tests. + show_plots : bool Show plots of tests. """ @@ -42,14 +51,14 @@ def struphy_test( str(mpi), "pytest", "-k", - "not _models and not _tutorial and not pproc and not _verif_", + "not _models and not _tutorial and not pproc", "--with-mpi", ] else: cmd = [ "pytest", "-k", - "not _models and not _tutorial and not pproc and not _verif_", + "not _models and not _tutorial and not pproc", ] if with_desc: @@ -61,18 +70,15 @@ def struphy_test( subp_run(cmd) - elif group in {"models", "fluid", "kinetic", "hybrid", "toy"}: + elif "models" in group: if mpi > 1: cmd = [ "mpirun", - "--oversubscribe", "-n", str(mpi), "pytest", "-k", "_models", - "-m", - group, "-s", "--with-mpi", ] @@ -81,68 +87,103 @@ def struphy_test( "pytest", "-k", "_models", - "-m", - group, "-s", ] + if fast: + cmd += ["--fast"] if vrbose: cmd += ["--vrbose"] + if verification: + cmd += ["--verification"] if nclones > 1: cmd += ["--nclones", f"{nclones}"] if show_plots: cmd += ["--show-plots"] subp_run(cmd) - elif "verification" in group: - if mpi > 1: - cmd = [ - "mpirun", - "--oversubscribe", - "-n", - str(mpi), - "pytest", - "-k", - "_verif_", - "-s", - "--with-mpi", - ] - else: + # test post processing of models + if not verification: cmd = [ "pytest", "-k", - "_verif_", + "pproc", "-s", ] + subp_run(cmd) - if vrbose: - cmd += ["--vrbose"] - if nclones > 1: - cmd += ["--nclones", f"{nclones}"] - if show_plots: - cmd += ["--show-plots"] - subp_run(cmd) - - else: + elif group in {"fluid", "kinetic", "hybrid", "toy"}: cmd = [ "mpirun", - "--oversubscribe", "-n", str(mpi), "pytest", "-k", - "_models", - "-m", - "single", + group + "_models", "-s", "--with-mpi", - "--model-name", - group, ] + if fast: + cmd += ["--fast"] if vrbose: cmd += ["--vrbose"] + if verification: + cmd += ["--verification"] if nclones > 1: cmd += ["--nclones", f"{nclones}"] if show_plots: cmd += ["--show-plots"] subp_run(cmd) + + if not verification: + from struphy.models.tests.test_xxpproc import test_pproc_codes + + test_pproc_codes(group=group) + + else: + import os + import pickle + + import struphy + + libpath = struphy.__path__[0] + + with open(os.path.join(libpath, "models", "models_message"), "rb") as fp: + model_message, fluid_message, kinetic_message, hybrid_message, toy_message = pickle.load( + fp, + ) + + if group in toy_message: + mtype = "toy" + elif group in fluid_message: + mtype = "fluid" + elif group in kinetic_message: + mtype = "kinetic" + elif group in hybrid_message: + mtype = "hybrid" + else: + raise ValueError(f"{group} is not a valid model name.") + + py_file = os.path.join(libpath, "models", "tests", "util.py") + + cmd = [ + "mpirun", + "-n", + str(mpi), + "python3", + py_file, + mtype, + group, + str(Tend), + str(fast), + str(vrbose), + str(verification), + str(nclones), + str(show_plots), + ] + subp_run(cmd) + + if not verification: + from struphy.models.tests.test_xxpproc import test_pproc_codes + + test_pproc_codes(group=mtype) diff --git a/src/struphy/console/tests/test_console.py b/src/struphy/console/tests/test_console.py index c94e41eb2..ae31eb6d2 100644 --- a/src/struphy/console/tests/test_console.py +++ b/src/struphy/console/tests/test_console.py @@ -7,6 +7,7 @@ import pytest # from psydac.ddm.mpi import mpi as MPI +import struphy import struphy as struphy_lib from struphy.console.compile import struphy_compile from struphy.console.main import struphy @@ -77,7 +78,7 @@ def split_command(command): # ["units", "Maxwell", "--input-abs", "/params.yml"], # Test cases for 'params' sub-command ["params", "Maxwell"], - ["params", "Vlasov"], + ["params", "Vlasov", "--options"], # ["params", "Maxwell", "-f", "params_Maxwell.yml"], # Test cases for 'profile' sub-command ["profile", "sim_1"], @@ -92,7 +93,7 @@ def split_command(command): # Test cases for 'test' sub-command ["test", "models"], ["test", "unit"], - ["test", "Maxwell"], + ["test", "Maxwell", "--Tend", "1.0"], ["test", "hybrid", "--mpi", "8"], ], ) @@ -289,7 +290,7 @@ def mock_remove(path): # Otherwise, we will not remove all the *_tmp.py files # We can not use the real os.remove becuase then # the state and all compiled files will be removed - print(f"{path =}") + print(f"{path = }") if "_tmp.py" in path: print("Not mock remove") os_remove(path) @@ -317,19 +318,19 @@ def mock_remove(path): time_execution=time_execution, yes=yes, ) - print(f"{language =}") - print(f"{compiler =}") - print(f"{omp_pic =}") - print(f"{omp_feec =}") - print(f"{delete =}") + print(f"{language = }") + print(f"{compiler = }") + print(f"{omp_pic = }") + print(f"{omp_feec = }") + print(f"{delete = }") print(f"{status} = ") - print(f"{verbose =}") - print(f"{dependencies =}") - print(f"{time_execution =}") - print(f"{yes =}") - print(f"{mock_save_state.call_count =}") - print(f"{mock_subprocess_run.call_count =}") - print(f"{mock_os_remove.call_count =}") + print(f"{verbose = }") + print(f"{dependencies = }") + print(f"{time_execution = }") + print(f"{yes = }") + print(f"{mock_save_state.call_count = }") + print(f"{mock_subprocess_run.call_count = }") + print(f"{mock_os_remove.call_count = }") if delete: print("if delete") @@ -359,9 +360,10 @@ def mock_remove(path): @pytest.mark.parametrize("model", ["Maxwell"]) @pytest.mark.parametrize("file", ["params_Maxwell.yml", "params_Maxwel2.yml"]) @pytest.mark.parametrize("yes", [True]) -def test_struphy_params(tmp_path, model, file, yes): +@pytest.mark.parametrize("options", [True, False]) +def test_struphy_params(tmp_path, model, file, yes, options): file_path = os.path.join(tmp_path, file) - struphy_params(model, str(file_path), yes=yes) + struphy_params(model, str(file_path), yes=yes, options=options) @pytest.mark.mpi_skip diff --git a/src/struphy/diagnostics/console_diagn.py b/src/struphy/diagnostics/console_diagn.py index e110d1497..35294b37a 100644 --- a/src/struphy/diagnostics/console_diagn.py +++ b/src/struphy/diagnostics/console_diagn.py @@ -5,13 +5,13 @@ import os import subprocess -import cunumpy as xp import h5py import yaml import struphy import struphy.utils.utils as utils from struphy.diagnostics.diagn_tools import plot_distr_fun, plot_scalars, plots_videos_2d +from struphy.utils.arrays import xp as np def main(): @@ -301,7 +301,7 @@ def main(): bckgr_fun = getattr(maxwellians, default_bckgr_type)() # Get values of background shifts in velocity space - positions = [xp.array([grid_slices["e" + str(k)]]) for k in range(1, 4)] + positions = [np.array([grid_slices["e" + str(k)]]) for k in range(1, 4)] u = bckgr_fun.u(*positions) eval_params = {"u" + str(k + 1): u[k][0] for k in range(3)} @@ -315,7 +315,7 @@ def main(): # Plot the distribution function if "plot_distr" in actions: # Get index of where to plot in time - time_idx = xp.argmin(xp.abs(time - saved_time)) + time_idx = np.argmin(np.abs(time - saved_time)) plot_distr_fun( path=os.path.join( diff --git a/src/struphy/diagnostics/continuous_spectra.py b/src/struphy/diagnostics/continuous_spectra.py index c31d789c7..cbe42dc94 100644 --- a/src/struphy/diagnostics/continuous_spectra.py +++ b/src/struphy/diagnostics/continuous_spectra.py @@ -37,9 +37,8 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, the radial location s_spec[m][0], squared eigenfrequencis s_spec[m][1] and global mode index s_spec[m][2] corresponding to slow sound modes for each poloidal mode number m in m_range. """ - import cunumpy as xp - import struphy.bsplines.bsplines as bsp + from struphy.utils.arrays import xp as np # greville points in radial direction (s) gN_1 = bsp.greville(space.T[0], space.p[0], space.spl_kind[0]) @@ -50,7 +49,7 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, gD_2 = bsp.greville(space.t[1], space.p[1] - 1, space.spl_kind[1]) # poloidal mode numbers - ms = xp.arange(m_range[1] - m_range[0] + 1) + m_range[0] + ms = np.arange(m_range[1] - m_range[0] + 1) + m_range[0] # grid for normalized Jacobian determinant det_df = domain.jacobian_det(gD_1, gD_2, 0.0) @@ -66,7 +65,7 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, s_spec = [[[], [], []] for m in ms] # only consider eigenmodes in range omega^2/omega_A^2 = [0, 1] - modes_ind = xp.where((xp.real(omega2) / omega_A**2 < 1.0) & (xp.real(omega2) / omega_A**2 > 0.0))[0] + modes_ind = np.where((np.real(omega2) / omega_A**2 < 1.0) & (np.real(omega2) / omega_A**2 > 0.0))[0] for i in range(modes_ind.size): # determine whether it's an Alfvén branch or sound branch by checking DIV(U) @@ -86,14 +85,14 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, U2_1_coeff = (U2_1_coeff[:, :, 0] - 1j * U2_1_coeff[:, :, 1]) / 2 # determine radial location of singularity by looking for a peak in eigenfunction U2_1 - s_ind = xp.unravel_index(xp.argmax(abs(U2_1_coeff)), U2_1_coeff.shape)[0] + s_ind = np.unravel_index(np.argmax(abs(U2_1_coeff)), U2_1_coeff.shape)[0] s = gN_1[s_ind] # perform fft to determine m - U2_1_fft = xp.fft.fft(U2_1_coeff) + U2_1_fft = np.fft.fft(U2_1_coeff) # determine m by looking for peak in Fourier spectrum at singularity - m = int((xp.fft.fftfreq(U2_1_fft[s_ind].size) * U2_1_fft[s_ind].size)[xp.argmax(abs(U2_1_fft[s_ind]))]) + m = int((np.fft.fftfreq(U2_1_fft[s_ind].size) * U2_1_fft[s_ind].size)[np.argmax(abs(U2_1_fft[s_ind]))]) ## perform shift for negative m # if m >= (space.Nel[1] + 1)//2: @@ -103,7 +102,7 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, for j in range(ms.size): if ms[j] == m: a_spec[j][0].append(s) - a_spec[j][1].append(xp.real(omega2[modes_ind[i]])) + a_spec[j][1].append(np.real(omega2[modes_ind[i]])) a_spec[j][2].append(modes_ind[i]) # Sound branch @@ -117,14 +116,14 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, U2_coeff = (U2_coeff[:, :, 0] - 1j * U2_coeff[:, :, 1]) / 2 # determine radial location of singularity by looking for a peak in eigenfunction (U2_2 or U2_3) - s_ind = xp.unravel_index(xp.argmax(abs(U2_coeff)), U2_coeff.shape)[0] + s_ind = np.unravel_index(np.argmax(abs(U2_coeff)), U2_coeff.shape)[0] s = gD_1[s_ind] # perform fft to determine m - U2_fft = xp.fft.fft(U2_coeff) + U2_fft = np.fft.fft(U2_coeff) # determine m by looking for peak in Fourier spectrum at singularity - m = int((xp.fft.fftfreq(U2_fft[s_ind].size) * U2_fft[s_ind].size)[xp.argmax(abs(U2_fft[s_ind]))]) + m = int((np.fft.fftfreq(U2_fft[s_ind].size) * U2_fft[s_ind].size)[np.argmax(abs(U2_fft[s_ind]))]) ## perform shift for negative m # if m >= (space.Nel[1] + 1)//2: @@ -134,13 +133,13 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, for j in range(ms.size): if ms[j] == m: s_spec[j][0].append(s) - s_spec[j][1].append(xp.real(omega2[modes_ind[i]])) + s_spec[j][1].append(np.real(omega2[modes_ind[i]])) s_spec[j][2].append(modes_ind[i]) # convert to array for j in range(ms.size): - a_spec[j] = xp.array(a_spec[j]) - s_spec[j] = xp.array(s_spec[j]) + a_spec[j] = np.array(a_spec[j]) + s_spec[j] = np.array(s_spec[j]) return a_spec, s_spec @@ -152,12 +151,13 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, import os import shutil - import cunumpy as xp import yaml + from struphy.utils.arrays import xp as np + # parse arguments parser = argparse.ArgumentParser( - description="Looks for eigenmodes in a given MHD eigenspectrum in a certain poloidal mode number range and plots the continuous shear Alfvén and slow sound spectra (frequency versus radial-like coordinate).", + description="Looks for eigenmodes in a given MHD eigenspectrum in a certain poloidal mode number range and plots the continuous shear Alfvén and slow sound spectra (frequency versus radial-like coordinate)." ) parser.add_argument("m_l_alfvén", type=int, help="lower bound of poloidal mode number range for Alfvénic modes") @@ -252,16 +252,11 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, fem_1d_2 = Spline_space_1d(Nel[1], p[1], spl_kind[1], nq_el[1], dirichlet_bc[1]) fem_2d = Tensor_spline_space( - [fem_1d_1, fem_1d_2], - polar_ck, - domain.cx[:, :, 0], - domain.cy[:, :, 0], - n_tor=n_tor, - basis_tor="i", + [fem_1d_1, fem_1d_2], polar_ck, domain.cx[:, :, 0], domain.cy[:, :, 0], n_tor=n_tor, basis_tor="i" ) # load and analyze spectrum - omega2, U2_eig = xp.split(xp.load(spec_path), [1], axis=0) + omega2, U2_eig = np.split(np.load(spec_path), [1], axis=0) omega2 = omega2.flatten() m_range_alfven = [args.m_l_alfvén, args.m_u_alfvén] @@ -287,7 +282,7 @@ def get_mhd_continua_2d(space, domain, omega2, U_eig, m_range, omega_A, div_tol, fig.set_figheight(12) fig.set_figwidth(14) - etaplot = [xp.linspace(0.0, 1.0, 201), xp.linspace(0.0, 1.0, 101)] + etaplot = [np.linspace(0.0, 1.0, 201), np.linspace(0.0, 1.0, 101)] etaplot[0][0] += 1e-5 diff --git a/src/struphy/diagnostics/diagn_tools.py b/src/struphy/diagnostics/diagn_tools.py index e7a9d8ee3..8ab8ec36a 100644 --- a/src/struphy/diagnostics/diagn_tools.py +++ b/src/struphy/diagnostics/diagn_tools.py @@ -3,7 +3,6 @@ import shutil import subprocess -import cunumpy as xp import matplotlib.colors as colors import matplotlib.pyplot as plt from scipy.fft import fftfreq, fftn @@ -11,25 +10,23 @@ from tqdm import tqdm from struphy.dispersion_relations import analytic +from struphy.utils.arrays import xp as np def power_spectrum_2d( - values: dict, - name: str, - grids: tuple, - grids_mapped: tuple = None, - component: int = 0, - slice_at: tuple = (None, 0, 0), - do_plot: bool = False, - disp_name: str = None, - disp_params: dict = {}, - fit_branches: int = 0, - noise_level: float = 0.1, - extr_order: int = 10, - fit_degree: tuple = (1,), - save_plot: bool = False, - save_name: str = None, - file_format: str = "png", + values, + name, + code, + grids, + grids_mapped=None, + component=0, + slice_at=(None, 0, 0), + do_plot=False, + disp_name=None, + disp_params={}, + save_plot=False, + save_name=None, + file_format="png", ): """Perform fft in space-time, (t, x) -> (omega, k), where x can be a logical or physical coordinate. Returns values if plot=False. @@ -37,22 +34,25 @@ def power_spectrum_2d( Parameters ---------- values : dict - Dictionary holding values of a B-spline FemField on the grid as 3d xp.arrays: + Dictionary holding values of a B-spline FemField on the grid as 3d np.arrays: values[n] contains the values at time step n, where n = 0:Nt-1:step with 0 0: - assert len(fit_degree) == fit_branches - # determine maxima for each k - k_start = kvec.size // 8 # take only first half of k-vector - k_end = kvec.size // 2 # take only first half of k-vector - k_fit = [] - omega_fit = {} - for n in range(fit_branches): - omega_fit[n] = [] - for k, f_of_omega in zip(kvec[k_start:k_end], dispersion[:, k_start:k_end].T): - threshold = xp.max(f_of_omega) * noise_level - extrms = argrelextrema(f_of_omega, xp.greater, order=extr_order)[0] - above_noise = xp.nonzero(f_of_omega > threshold)[0] - intersec = list(set(extrms) & set(above_noise)) - # intersec = list(set(extrms)) - if not intersec: - continue - intersec.sort() - # print(f"{intersec = }") - # print(f"{[omega[intersec[n]] for n in range(fit_branches)]}") - assert len(intersec) == fit_branches, ( - f"Number of found branches {len(intersec)} is not {fit_branches =}! \ - Try to lower 'noise_level' or increase 'extr_order'." - ) - k_fit += [k] - for n in range(fit_branches): - omega_fit[n] += [omega[intersec[n]]] + print(f"space step: {dx}") + assert np.allclose(grid[1:] - grid[:-1], dx * np.ones_like(grid[:-1])) - # fit - coeffs = [] - for m, om in omega_fit.items(): - coeffs += [xp.polyfit(k_fit, om, deg=fit_degree[n])] - print(f"\nFitted {coeffs =}") + dispersion = (2.0 / Nt) * (2.0 / Nx) * np.abs(fftn(data))[: Nt // 2, : Nx // 2] + kvec = 2 * np.pi * fftfreq(Nx, dx)[: Nx // 2] + omega = 2 * np.pi * fftfreq(Nt, dt)[: Nt // 2] if do_plot: _, ax = plt.subplots(1, 1, figsize=(10, 10)) colormap = "plasma" - K, W = xp.meshgrid(kvec, omega) - lvls = xp.logspace(-15, -1, 27) + K, W = np.meshgrid(kvec, omega) + lvls = np.logspace(-15, -1, 27) disp_plot = ax.contourf( K, W, @@ -204,22 +156,11 @@ def power_spectrum_2d( mappable=disp_plot, format="%.0e", ) - title = name + ", component " + str(component + 1) + title = name + " component " + str(component + 1) + " from code: " + code ax.set_title(title) ax.set_xlabel("$k$ [a.u.]") ax.set_ylabel(r"$\omega$ [a.u.]") - if fit_branches > 0: - for n, cs in enumerate(coeffs): - - def fun(k): - out = k * 0.0 - for i, c in enumerate(xp.flip(cs)): - out += c * k**i - return out - - ax.plot(kvec, fun(kvec), "r:", label=f"fit_{n + 1}") - # analytic solution: disp_class = getattr(analytic, disp_name) disp = disp_class(**disp_params) @@ -230,12 +171,12 @@ def fun(k): set_min = 0.0 set_max = 0.0 for key, branch in branches.items(): - vals = xp.real(branch) + vals = np.real(branch) ax.plot(kvec, vals, "--", label=key) - tmp = xp.min(vals) + tmp = np.min(vals) if tmp < set_min: set_min = tmp - tmp = xp.max(vals) + tmp = np.max(vals) if tmp > set_max: set_max = tmp @@ -249,7 +190,8 @@ def fun(k): else: plt.show() - return omega, kvec, dispersion, coeffs + else: + return kvec, omega, dispersion def plot_scalars( @@ -331,8 +273,8 @@ def plot_scalars( plt.figure("en_tot_rel_err") plt.plot( time[1:], - xp.divide( - xp.abs(en_tot[1:] - en_tot[0]), + np.divide( + np.abs(en_tot[1:] - en_tot[0]), en_tot[0], ), ) @@ -363,9 +305,9 @@ def plot_scalars( for key, plot_quantity in plot_quantities.items(): # Get the indices of the extrema if do_fit: - inds_exs = argrelextrema(plot_quantity, xp.greater, order=order) + inds_exs = argrelextrema(plot_quantity, np.greater, order=order) elif fit_minima: - inds_exs = argrelextrema(plot_quantity, xp.less, order=order) + inds_exs = argrelextrema(plot_quantity, np.less, order=order) else: inds_exs = None @@ -376,10 +318,10 @@ def plot_scalars( # for plotting take a bit more time at start and end if len(inds_exs[0]) >= 2: - time_start_idx = xp.max( + time_start_idx = np.max( [0, 2 * inds_exs[0][start_extremum] - inds_exs[0][start_extremum + 1]], ) - time_end_idx = xp.min( + time_end_idx = np.min( [ len(time) - 1, 2 * inds_exs[0][start_extremum + no_extrema - 1] - inds_exs[0][start_extremum + no_extrema - 2], @@ -395,9 +337,9 @@ def plot_scalars( if inds_exs is not None: # do the fitting - coeffs = xp.polyfit( + coeffs = np.polyfit( times_extrema, - xp.log( + np.log( quantity_extrema, ), deg=degree, @@ -410,15 +352,15 @@ def plot_scalars( ) plt.plot( time_cut, - xp.exp(coeffs[0] * time_cut + coeffs[1]), - label=r"$a * \exp(m x)$ with" + f"\na={xp.round(xp.exp(coeffs[1]), 3)} m={xp.round(coeffs[0], 3)}", + np.exp(coeffs[0] * time_cut + coeffs[1]), + label=r"$a * \exp(m x)$ with" + f"\na={np.round(np.exp(coeffs[1]), 3)} m={np.round(coeffs[0], 3)}", ) else: plt.plot(time, plot_quantity[:], ".", label=key, markersize=2) if inds_exs is not None: # do the fitting - coeffs = xp.polyfit( + coeffs = np.polyfit( times_extrema, quantity_extrema, deg=degree, @@ -433,8 +375,8 @@ def plot_scalars( ) plt.plot( time_cut, - xp.exp(coeffs[0] * time_cut + coeffs[1]), - label=r"$a x + b$ with" + f"\na={xp.round(coeffs[1], 3)} b={xp.round(coeffs[0], 3)}", + np.exp(coeffs[0] * time_cut + coeffs[1]), + label=r"$a x + b$ with" + f"\na={np.round(coeffs[1], 3)} b={np.round(coeffs[0], 3)}", ) plt.legend() @@ -496,11 +438,11 @@ def plot_distr_fun( # load full distribution functions if filename == "f_binned.npy": - f = xp.load(filepath) + f = np.load(filepath) # load delta f elif filename == "delta_f_binned.npy": - delta_f = xp.load(filepath) + delta_f = np.load(filepath) assert f is not None, "No distribution function file found!" @@ -508,7 +450,7 @@ def plot_distr_fun( directions = folder.split("_") for direction in directions: grids += [ - xp.load( + np.load( os.path.join( subpath, "grid_" + direction + ".npy", @@ -519,8 +461,8 @@ def plot_distr_fun( # Get indices of where to plot in other directions grid_idxs = {} for k in range(f.ndim - 1): - grid_idxs[directions[k]] = xp.argmin( - xp.abs(grids[k] - grid_slices[directions[k]]), + grid_idxs[directions[k]] = np.argmin( + np.abs(grids[k] - grid_slices[directions[k]]), ) for k in range(f.ndim - 1): @@ -655,17 +597,17 @@ def plots_videos_2d( grid_idxs = {} for k in range(df_data.ndim - 1): direc = directions[k] - grid_idxs[direc] = xp.argmin( - xp.abs(grids[direc] - grid_slices[direc]), + grid_idxs[direc] = np.argmin( + np.abs(grids[direc] - grid_slices[direc]), ) - grid_1 = xp.load( + grid_1 = np.load( os.path.join( data_path, "grid_" + label_1 + ".npy", ), ) - grid_2 = xp.load( + grid_2 = np.load( os.path.join( data_path, "grid_" + label_2 + ".npy", @@ -683,7 +625,7 @@ def plots_videos_2d( df_binned = df_data[tuple(f_slicing)].squeeze() - assert t_grid.ndim == grid_1.ndim == grid_2.ndim == 1, "Input arrays must be 1D!" + assert t_grid.ndim == grid_1.ndim == grid_2.ndim == 1, f"Input arrays must be 1D!" assert df_binned.shape[0] == t_grid.size, f"{df_binned.shape =}, {t_grid.shape =}" assert df_binned.shape[1] == grid_1.size, f"{df_binned.shape =}, {grid_1.shape =}" assert df_binned.shape[2] == grid_2.size, f"{df_binned.shape =}, {grid_2.shape =}" @@ -696,9 +638,9 @@ def plots_videos_2d( var *= polar_params["r_max"] - polar_params["r_min"] var += polar_params["r_min"] elif polar_params["angular_coord"] == sl: - var *= 2 * xp.pi + var *= 2 * np.pi - grid_1_mesh, grid_2_mesh = xp.meshgrid(grid_1, grid_2, indexing="ij") + grid_1_mesh, grid_2_mesh = np.meshgrid(grid_1, grid_2, indexing="ij") if output == "video": plots_2d_video( @@ -745,7 +687,7 @@ def video_2d(slc, diagn_path, images_path): Parameters ---------- - t_grid : xp.ndarray + t_grid : np.ndarray 1D-array containing all the times grid_slices : dict @@ -833,15 +775,15 @@ def plots_2d_video( # Get parameters for time and labelling for it nt = len(t_grid) - log_nt = int(xp.log10(nt)) + 1 + log_nt = int(np.log10(nt)) + 1 len_dt = len(str(t_grid[1]).split(".")[1]) # Get the correct scale for the plots - vmin += [xp.min(df_binned[:]) / 3] - vmax += [xp.max(df_binned[:]) / 3] - vmin = xp.min(vmin) - vmax = xp.max(vmax) - vscale = xp.max(xp.abs([vmin, vmax])) + vmin += [np.min(df_binned[:]) / 3] + vmax += [np.max(df_binned[:]) / 3] + vmin = np.min(vmin) + vmax = np.max(vmax) + vscale = np.max(np.abs([vmin, vmax])) # Set up the figure and axis once if do_polar: @@ -939,18 +881,18 @@ def plots_2d_overview( fig_height = 8.5 else: n_cols = 3 - n_rows = int(xp.ceil(n_times / n_cols)) + n_rows = int(np.ceil(n_times / n_cols)) fig_height = 4 * n_rows fig_size = (4 * n_cols, fig_height) # Get the correct scale for the plots for time in times: - vmin += [xp.min(df_binned[time]) / 3] - vmax += [xp.max(df_binned[time]) / 3] - vmin = xp.min(vmin) - vmax = xp.max(vmax) - vscale = xp.max(xp.abs([vmin, vmax])) + vmin += [np.min(df_binned[time]) / 3] + vmax += [np.max(df_binned[time]) / 3] + vmin = np.min(vmin) + vmax = np.max(vmax) + vscale = np.max(np.abs([vmin, vmax])) # Plot options for polar plots subplot_kw = dict(projection="polar") if do_polar else None @@ -959,8 +901,8 @@ def plots_2d_overview( fig, axes = plt.subplots(n_rows, n_cols, figsize=fig_size, subplot_kw=subplot_kw) # So we an use .flatten() even for just 1 plot - if not isinstance(axes, xp.ndarray): - axes = xp.array([axes]) + if not isinstance(axes, np.ndarray): + axes = np.array([axes]) # fig.tight_layout(h_pad=5.0, w_pad=5.0) # fig.tight_layout(pad=5.0) @@ -976,7 +918,7 @@ def plots_2d_overview( # Set the suptitle fig.suptitle(f"Struphy model '{model_name}'") - for k in xp.arange(n_times): + for k in np.arange(n_times): obj = axes.flatten()[k] n = times[k] t = f"%.{len_dt}f" % t_grid[n] @@ -1048,13 +990,13 @@ def get_slices_grids_directions_and_df_data(plot_full_f, grid_slices, data_path, slices_2d : list[string] A list of all the slicings - grids : list[xp.ndarray] + grids : list[np.ndarray] A list of all grids according to the slices directions : list[string] A list of the directions that appear in all slices - df_data : xp.ndarray + df_data : np.ndarray The data of delta-f (in case of full-f: distribution function minus background) """ @@ -1063,7 +1005,7 @@ def get_slices_grids_directions_and_df_data(plot_full_f, grid_slices, data_path, # Load all the grids grids = {} for direction in directions: - grids[direction] = xp.load( + grids[direction] = np.load( os.path.join(data_path, "grid_" + direction + ".npy"), ) @@ -1072,7 +1014,7 @@ def get_slices_grids_directions_and_df_data(plot_full_f, grid_slices, data_path, _name = "f_binned.npy" else: _name = "delta_f_binned.npy" - _data = xp.load(os.path.join(data_path, _name)) + _data = np.load(os.path.join(data_path, _name)) # Check how many slicings have been given and make slices_2d for all # combinations of spatial and velocity dimensions diff --git a/src/struphy/diagnostics/diagnostics_pic.ipynb b/src/struphy/diagnostics/diagnostics_pic.ipynb index f41425141..d4b2f2e0f 100644 --- a/src/struphy/diagnostics/diagnostics_pic.ipynb +++ b/src/struphy/diagnostics/diagnostics_pic.ipynb @@ -7,13 +7,11 @@ "outputs": [], "source": [ "import os\n", - "\n", + "import struphy\n", "import numpy as np\n", "from matplotlib import pyplot as plt\n", "\n", - "import struphy\n", - "\n", - "path_out = os.path.join(struphy.__path__[0], \"io/out\", \"sim_1\")\n", + "path_out = os.path.join(struphy.__path__[0], 'io/out', 'sim_1')\n", "\n", "print(path_out)\n", "os.listdir(path_out)" @@ -30,7 +28,7 @@ "metadata": {}, "outputs": [], "source": [ - "data_path = os.path.join(path_out, \"post_processing\")\n", + "data_path = os.path.join(path_out, 'post_processing')\n", "\n", "os.listdir(data_path)" ] @@ -41,7 +39,7 @@ "metadata": {}, "outputs": [], "source": [ - "t_grid = np.load(os.path.join(data_path, \"t_grid.npy\"))\n", + "t_grid = np.load(os.path.join(data_path, 't_grid.npy'))\n", "t_grid" ] }, @@ -51,7 +49,7 @@ "metadata": {}, "outputs": [], "source": [ - "f_path = os.path.join(data_path, \"kinetic_data\", \"ions\", \"distribution_function\")\n", + "f_path = os.path.join(data_path, 'kinetic_data', 'ions', 'distribution_function')\n", "\n", "print(os.listdir(f_path))" ] @@ -62,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "path = os.path.join(f_path, \"e1\")\n", + "path = os.path.join(f_path, 'e1')\n", "print(os.listdir(path))" ] }, @@ -72,9 +70,9 @@ "metadata": {}, "outputs": [], "source": [ - "grid = np.load(os.path.join(f_path, \"e1/\", \"grid_e1.npy\"))\n", - "f_binned = np.load(os.path.join(f_path, \"e1/\", \"f_binned.npy\"))\n", - "delta_f_e1_binned = np.load(os.path.join(f_path, \"e1/\", \"delta_f_binned.npy\"))\n", + "grid = np.load(os.path.join(f_path, 'e1/', 'grid_e1.npy'))\n", + "f_binned = np.load(os.path.join(f_path, 'e1/', 'f_binned.npy'))\n", + "delta_f_e1_binned = np.load(os.path.join(f_path, 'e1/', 'delta_f_binned.npy'))\n", "\n", "print(grid.shape)\n", "print(f_binned.shape)\n", @@ -89,18 +87,18 @@ "source": [ "steps = list(np.arange(10))\n", "\n", - "plt.figure(figsize=(12, 5 * len(steps)))\n", + "plt.figure(figsize=(12, 5*len(steps)))\n", "for n, step in enumerate(steps):\n", - " plt.subplot(len(steps), 2, 2 * n + 1)\n", - " plt.plot(grid, f_binned[step], label=f\"time = {t_grid[step]}\")\n", - " plt.xlabel(\"e1\")\n", - " # plt.ylim([.5, 1.5])\n", - " plt.title(\"full-f\")\n", - " plt.subplot(len(steps), 2, 2 * n + 2)\n", - " plt.plot(grid, delta_f_e1_binned[step], label=f\"time = {t_grid[step]}\")\n", - " plt.xlabel(\"e1\")\n", - " # plt.ylim([-3e-3, 3e-3])\n", - " plt.title(r\"$\\delta f$\")\n", + " plt.subplot(len(steps), 2, 2*n + 1)\n", + " plt.plot(grid, f_binned[step], label=f'time = {t_grid[step]}')\n", + " plt.xlabel('e1')\n", + " #plt.ylim([.5, 1.5])\n", + " plt.title('full-f')\n", + " plt.subplot(len(steps), 2, 2*n + 2)\n", + " plt.plot(grid, delta_f_e1_binned[step], label=f'time = {t_grid[step]}')\n", + " plt.xlabel('e1')\n", + " #plt.ylim([-3e-3, 3e-3])\n", + " plt.title(r'$\\delta f$')\n", " plt.legend()" ] }, @@ -110,7 +108,7 @@ "metadata": {}, "outputs": [], "source": [ - "path = os.path.join(f_path, \"e1_v1\")\n", + "path = os.path.join(f_path, 'e1_v1')\n", "print(os.listdir(path))" ] }, @@ -120,10 +118,10 @@ "metadata": {}, "outputs": [], "source": [ - "grid_e1 = np.load(os.path.join(f_path, \"e1_v1/\", \"grid_e1.npy\"))\n", - "grid_v1 = np.load(os.path.join(f_path, \"e1_v1/\", \"grid_v1.npy\"))\n", - "f_binned = np.load(os.path.join(f_path, \"e1_v1/\", \"f_binned.npy\"))\n", - "delta_f_binned = np.load(os.path.join(f_path, \"e1_v1/\", \"delta_f_binned.npy\"))\n", + "grid_e1 = np.load(os.path.join(f_path, 'e1_v1/', 'grid_e1.npy'))\n", + "grid_v1 = np.load(os.path.join(f_path, 'e1_v1/', 'grid_v1.npy'))\n", + "f_binned = np.load(os.path.join(f_path, 'e1_v1/', 'f_binned.npy'))\n", + "delta_f_binned = np.load(os.path.join(f_path, 'e1_v1/', 'delta_f_binned.npy'))\n", "\n", "print(grid_e1.shape)\n", "print(grid_v1.shape)\n", @@ -139,20 +137,20 @@ "source": [ "steps = list(np.arange(10))\n", "\n", - "plt.figure(figsize=(12, 5 * len(steps)))\n", + "plt.figure(figsize=(12, 5*len(steps)))\n", "for n, step in enumerate(steps):\n", - " plt.subplot(len(steps), 2, 2 * n + 1)\n", - " plt.pcolor(grid_e1, grid_v1, f_binned[step].T, label=f\"time = {t_grid[step]}\")\n", - " plt.xlabel(\"$e1$\")\n", - " plt.ylabel(r\"$v_\\parallel$\")\n", - " plt.title(\"full-f\")\n", + " plt.subplot(len(steps), 2, 2*n + 1)\n", + " plt.pcolor(grid_e1, grid_v1, f_binned[step].T, label=f'time = {t_grid[step]}')\n", + " plt.xlabel('$e1$')\n", + " plt.ylabel(r'$v_\\parallel$')\n", + " plt.title('full-f')\n", " plt.legend()\n", " plt.colorbar()\n", - " plt.subplot(len(steps), 2, 2 * n + 2)\n", - " plt.pcolor(grid_e1, grid_v1, delta_f_binned[step].T, label=f\"time = {t_grid[step]}\")\n", - " plt.xlabel(\"$e1$\")\n", - " plt.ylabel(r\"$v_\\parallel$\")\n", - " plt.title(r\"$\\delta f$\")\n", + " plt.subplot(len(steps), 2, 2*n + 2)\n", + " plt.pcolor(grid_e1, grid_v1, delta_f_binned[step].T, label=f'time = {t_grid[step]}')\n", + " plt.xlabel('$e1$')\n", + " plt.ylabel(r'$v_\\parallel$')\n", + " plt.title(r'$\\delta f$')\n", " plt.legend()\n", " plt.colorbar()" ] @@ -163,7 +161,7 @@ "metadata": {}, "outputs": [], "source": [ - "fields_path = os.path.join(data_path, \"fields_data\")\n", + "fields_path = os.path.join(data_path, 'fields_data')\n", "\n", "print(os.listdir(fields_path))" ] @@ -176,7 +174,7 @@ "source": [ "import pickle\n", "\n", - "with open(os.path.join(fields_path, \"grids_phy.bin\"), \"rb\") as file:\n", + "with open(os.path.join(fields_path, 'grids_phy.bin'), 'rb') as file:\n", " x_grid, y_grid, z_grid = pickle.load(file)\n", "\n", "print(type(x_grid))\n", @@ -189,7 +187,7 @@ "metadata": {}, "outputs": [], "source": [ - "with open(os.path.join(fields_path, \"em_fields\", \"phi_phy.bin\"), \"rb\") as file:\n", + "with open(os.path.join(fields_path, 'em_fields', 'phi_phy.bin'), 'rb') as file:\n", " phi = pickle.load(file)\n", "\n", "plt.figure(figsize=(12, 12))\n", @@ -199,9 +197,9 @@ " t = t_grid[step]\n", " print(phi[t][0].shape)\n", " plt.subplot(2, 2, n + 1)\n", - " plt.plot(x_grid[:, 0, 0], phi[t][0][:, 0, 0], label=f\"time = {t}\")\n", - " plt.xlabel(\"x\")\n", - " plt.ylabel(r\"$\\phi$(x)\")\n", + " plt.plot(x_grid[:, 0, 0], phi[t][0][:, 0, 0], label=f'time = {t}')\n", + " plt.xlabel('x')\n", + " plt.ylabel(r'$\\phi$(x)')\n", " plt.legend()" ] }, diff --git a/src/struphy/diagnostics/paraview/mesh_creator.py b/src/struphy/diagnostics/paraview/mesh_creator.py index 4bf83211c..0a8a35903 100644 --- a/src/struphy/diagnostics/paraview/mesh_creator.py +++ b/src/struphy/diagnostics/paraview/mesh_creator.py @@ -1,10 +1,11 @@ # from tqdm import tqdm -import cunumpy as xp import vtkmodules.all as vtk from vtkmodules.util.numpy_support import numpy_to_vtk as np2vtk from vtkmodules.util.numpy_support import vtk_to_numpy as vtk2np from vtkmodules.vtkCommonDataModel import vtkUnstructuredGrid +from struphy.utils.arrays import xp as np + def make_ugrid_and_write_vtu(filename: str, writer, vtk_dir, gvec, s_range, u_range, v_range, periodic): """A helper function to orchestrate operations to run many test cases. @@ -37,12 +38,7 @@ def make_ugrid_and_write_vtu(filename: str, writer, vtk_dir, gvec, s_range, u_ra point_data = {} cell_data = {} vtk_points, suv_points, xyz_points, point_indices = gen_vtk_points( - gvec, - s_range, - u_range, - v_range, - point_data, - cell_data, + gvec, s_range, u_range, v_range, point_data, cell_data ) print("vtk_points.GetNumberOfPoints()", vtk_points.GetNumberOfPoints(), flush=True) @@ -85,43 +81,43 @@ def gen_vtk_points(gvec, s_range, u_range, v_range, point_data, cell_data): pt_idx = 0 vtk_points = vtk.vtkPoints() - suv_points = xp.zeros((s_range.shape[0], u_range.shape[0], v_range.shape[0], 3)) - xyz_points = xp.zeros((s_range.shape[0], u_range.shape[0], v_range.shape[0], 3)) - point_indices = xp.zeros((s_range.shape[0], u_range.shape[0], v_range.shape[0]), dtype=xp.int_) + suv_points = np.zeros((s_range.shape[0], u_range.shape[0], v_range.shape[0], 3)) + xyz_points = np.zeros((s_range.shape[0], u_range.shape[0], v_range.shape[0], 3)) + point_indices = np.zeros((s_range.shape[0], u_range.shape[0], v_range.shape[0]), dtype=np.int_) # Add metadata to grid. num_pts = s_range.shape[0] * u_range.shape[0] * v_range.shape[0] - point_data["s"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["u"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["v"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["x"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["y"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["z"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["theta"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["zeta"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["Point ID"] = xp.zeros(num_pts, dtype=xp.int_) - point_data["pressure"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["phi"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["chi"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["iota"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["q"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["det"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["det/(2pi)^2"] = xp.zeros(num_pts, dtype=xp.float_) - point_data["A"] = xp.zeros((num_pts, 3), dtype=xp.float_) - point_data["A_vec"] = xp.zeros((num_pts, 3), dtype=xp.float_) - point_data["A_1"] = xp.zeros((num_pts, 3), dtype=xp.float_) - point_data["A_2"] = xp.zeros((num_pts, 3), dtype=xp.float_) - point_data["B"] = xp.zeros((num_pts, 3), dtype=xp.float_) - point_data["B_vec"] = xp.zeros((num_pts, 3), dtype=xp.float_) - point_data["B_1"] = xp.zeros((num_pts, 3), dtype=xp.float_) - point_data["B_2"] = xp.zeros((num_pts, 3), dtype=xp.float_) + point_data["s"] = np.zeros(num_pts, dtype=np.float_) + point_data["u"] = np.zeros(num_pts, dtype=np.float_) + point_data["v"] = np.zeros(num_pts, dtype=np.float_) + point_data["x"] = np.zeros(num_pts, dtype=np.float_) + point_data["y"] = np.zeros(num_pts, dtype=np.float_) + point_data["z"] = np.zeros(num_pts, dtype=np.float_) + point_data["theta"] = np.zeros(num_pts, dtype=np.float_) + point_data["zeta"] = np.zeros(num_pts, dtype=np.float_) + point_data["Point ID"] = np.zeros(num_pts, dtype=np.int_) + point_data["pressure"] = np.zeros(num_pts, dtype=np.float_) + point_data["phi"] = np.zeros(num_pts, dtype=np.float_) + point_data["chi"] = np.zeros(num_pts, dtype=np.float_) + point_data["iota"] = np.zeros(num_pts, dtype=np.float_) + point_data["q"] = np.zeros(num_pts, dtype=np.float_) + point_data["det"] = np.zeros(num_pts, dtype=np.float_) + point_data["det/(2pi)^2"] = np.zeros(num_pts, dtype=np.float_) + point_data["A"] = np.zeros((num_pts, 3), dtype=np.float_) + point_data["A_vec"] = np.zeros((num_pts, 3), dtype=np.float_) + point_data["A_1"] = np.zeros((num_pts, 3), dtype=np.float_) + point_data["A_2"] = np.zeros((num_pts, 3), dtype=np.float_) + point_data["B"] = np.zeros((num_pts, 3), dtype=np.float_) + point_data["B_vec"] = np.zeros((num_pts, 3), dtype=np.float_) + point_data["B_1"] = np.zeros((num_pts, 3), dtype=np.float_) + point_data["B_2"] = np.zeros((num_pts, 3), dtype=np.float_) # pbar = tqdm(total=num_pts) for s_idx, s in enumerate(s_range): for u_idx, u in enumerate(u_range): for v_idx, v in enumerate(v_range): point = gvec.f(s, u, v) - suv_points[s_idx, u_idx, v_idx, :] = xp.array([s, u, v]) + suv_points[s_idx, u_idx, v_idx, :] = np.array([s, u, v]) xyz_points[s_idx, u_idx, v_idx, :] = point point_indices[s_idx, u_idx, v_idx] = pt_idx vtk_points.InsertPoint(pt_idx, point) @@ -153,10 +149,10 @@ def gen_vtk_points(gvec, s_range, u_range, v_range, point_data, cell_data): pt_idx += 1 # pbar.close() - point_data["theta"] = 2 * xp.pi * point_data["u"] - point_data["zeta"] = 2 * xp.pi * point_data["v"] + point_data["theta"] = 2 * np.pi * point_data["u"] + point_data["zeta"] = 2 * np.pi * point_data["v"] point_data["q"] = 1 / point_data["iota"] - point_data["det/(2pi)^2"] = point_data["det"] / (2 * xp.pi) ** 2 + point_data["det/(2pi)^2"] = point_data["det"] / (2 * np.pi) ** 2 return vtk_points, suv_points, xyz_points, point_indices @@ -316,4 +312,4 @@ def connect_cell(s_range, u_range, v_range, point_indices, ugrid, point_data, ce cell_data["Cell ID"].append(cell_idx) cell_idx += 1 - cell_data["Cell ID"] = xp.array(cell_data["Cell ID"], dtype=xp.int_) + cell_data["Cell ID"] = np.array(cell_data["Cell ID"], dtype=np.int_) diff --git a/src/struphy/dispersion_relations/analytic.py b/src/struphy/dispersion_relations/analytic.py index a54355428..8a87a65bb 100644 --- a/src/struphy/dispersion_relations/analytic.py +++ b/src/struphy/dispersion_relations/analytic.py @@ -1,12 +1,12 @@ "Analytic dispersion relations." -import cunumpy as xp from numpy.polynomial import Polynomial from scipy.optimize import fsolve from struphy.dispersion_relations.base import ContinuousSpectra1D, DispersionRelations1D from struphy.dispersion_relations.utilities import Zplasma from struphy.fields_background.equils import set_defaults +from struphy.utils.arrays import xp as np class Maxwell1D(DispersionRelations1D): @@ -108,18 +108,18 @@ def __call__(self, k): Bsquare = self.params["B0x"] ** 2 + self.params["B0y"] ** 2 + self.params["B0z"] ** 2 # Alfvén velocity and speed of sound - vA = xp.sqrt(Bsquare / self.params["n0"]) + vA = np.sqrt(Bsquare / self.params["n0"]) - cS = xp.sqrt(self.params["gamma"] * self.params["p0"] / self.params["n0"]) + cS = np.sqrt(self.params["gamma"] * self.params["p0"] / self.params["n0"]) # shear Alfvén branch - self._branches["shear Alfvén"] = vA * k * self.params["B0z"] / xp.sqrt(Bsquare) + self._branches["shear Alfvén"] = vA * k * self.params["B0z"] / np.sqrt(Bsquare) # slow/fast magnetosonic branch delta = (4 * self.params["B0z"] ** 2 * cS**2 * vA**2) / ((cS**2 + vA**2) ** 2 * Bsquare) - self._branches["slow magnetosonic"] = xp.sqrt(1 / 2 * k**2 * (cS**2 + vA**2) * (1 - xp.sqrt(1 - delta))) - self._branches["fast magnetosonic"] = xp.sqrt(1 / 2 * k**2 * (cS**2 + vA**2) * (1 + xp.sqrt(1 - delta))) + self._branches["slow magnetosonic"] = np.sqrt(1 / 2 * k**2 * (cS**2 + vA**2) * (1 - np.sqrt(1 - delta))) + self._branches["fast magnetosonic"] = np.sqrt(1 / 2 * k**2 * (cS**2 + vA**2) * (1 + np.sqrt(1 - delta))) return self.branches @@ -186,14 +186,14 @@ def __call__(self, k): Bsquare = self.params["B0x"] ** 2 + self.params["B0y"] ** 2 + self.params["B0z"] ** 2 - cos_theta = self.params["B0z"] / xp.sqrt(Bsquare) + cos_theta = self.params["B0z"] / np.sqrt(Bsquare) # Alfvén velocity, speed of sound and cyclotron frequency - vA = xp.sqrt(Bsquare / self.params["n0"]) + vA = np.sqrt(Bsquare / self.params["n0"]) - cS = xp.sqrt(self.params["gamma"] * self.params["p0"] / self.params["n0"]) + cS = np.sqrt(self.params["gamma"] * self.params["p0"] / self.params["n0"]) - Omega_i = xp.sqrt(Bsquare) / self.params["eps"] + Omega_i = np.sqrt(Bsquare) / self.params["eps"] # auxiliary functions def omega_0(k): @@ -218,7 +218,7 @@ def discriminant(k): ) # solve - out = xp.zeros((k.size, 4), dtype=complex) + out = np.zeros((k.size, 4), dtype=complex) for i, ki in enumerate(k): p0 = Polynomial([-(omega_0(ki) ** 2), 1.0]) p1 = Polynomial([d(ki), c(ki), b(ki), 1.0]) @@ -261,15 +261,7 @@ class FluidSlabITG(DispersionRelations1D): def __init__(self, vstar=10.0, vi=1.0, Z=1.0, kz=1.0, gamma=5 / 3): super().__init__( - "wave 1", - "wave 2", - "wave 3", - velocity_scale="thermal", - vstar=vstar, - vi=vi, - Z=Z, - kz=kz, - gamma=gamma, + "wave 1", "wave 2", "wave 3", velocity_scale="thermal", vstar=vstar, vi=vi, Z=Z, kz=kz, gamma=gamma ) def __call__(self, k): @@ -310,7 +302,7 @@ def discriminant(k): return -4.0 * p**3 - 27.0 * q(k) ** 2 # solve - out = xp.zeros((k.size, 3), dtype=complex) + out = np.zeros((k.size, 3), dtype=complex) for i, ki in enumerate(k): poly = Polynomial([q(ki), p, 0.0, 1.0]) out[i] = poly.roots() @@ -350,17 +342,17 @@ def __call__(self, kvec): # One complex array for each branch tmps = [] for n in range(self.nbranches): - tmps += [xp.zeros_like(kvec, dtype=complex)] + tmps += [np.zeros_like(kvec, dtype=complex)] ########### Model specific part ############################## # angle between k and magnetic field if self.params["B0z"] == 0: - theta = xp.pi / 2 + theta = np.pi / 2 else: - theta = xp.arctan(xp.sqrt(self.params["B0x"] ** 2 + self.params["B0y"] ** 2) / self.params["B0z"]) + theta = np.arctan(np.sqrt(self.params["B0x"] ** 2 + self.params["B0y"] ** 2) / self.params["B0z"]) print(theta) - cos2 = xp.cos(theta) ** 2 + cos2 = np.cos(theta) ** 2 neq = self.params["n0"] @@ -401,10 +393,10 @@ def __call__(self, kvec): e = eps6 # determinant in polynomial form - det = xp.polynomial.Polynomial([a, b, c, d, e]) + det = np.polynomial.Polynomial([a, b, c, d, e]) # solutions - sol = xp.sqrt(xp.abs(det.roots())) + sol = np.sqrt(np.abs(det.roots())) # Ion-cyclotron branch tmps[0][n] = sol[0] # Electron-cyclotron branch @@ -497,7 +489,7 @@ def __init__(self, **params): ee = 1.602176634e-19 # calculate coupling parameter alpha_c from bulk number density and mass number - self._kappa = ee * xp.sqrt(mu * self.params["Ab"] * self.params["nb"] * 1e20 / mp) + self._kappa = ee * np.sqrt(mu * self.params["Ab"] * self.params["nb"] * 1e20 / mp) def __call__(self, k, method="newton", tol=1e-10, max_it=100): """ @@ -526,7 +518,7 @@ def __call__(self, k, method="newton", tol=1e-10, max_it=100): # One complex array for each branch tmps = [] for _ in range(self.nbranches): - tmps += [xp.zeros_like(k, dtype=complex)] + tmps += [np.zeros_like(k, dtype=complex)] ########### Model specific part ############################## @@ -540,8 +532,8 @@ def __call__(self, k, method="newton", tol=1e-10, max_it=100): wR = [self.params["B0"] * ki, 0.0] wL = [self.params["B0"] * ki, 0.0] else: - wR = [xp.real(tmps[0][i - 1]), xp.imag(tmps[0][i - 1])] - wL = [xp.real(tmps[1][i - 1]), xp.imag(tmps[1][i - 1])] + wR = [np.real(tmps[0][i - 1]), np.imag(tmps[0][i - 1])] + wL = [np.real(tmps[1][i - 1]), np.imag(tmps[1][i - 1])] # apply solver if method == "newton": @@ -550,13 +542,13 @@ def __call__(self, k, method="newton", tol=1e-10, max_it=100): Dr, Di = self.D_RL(wR, ki, +1) - while xp.abs(Dr + Di * 1j) > tol or counter == max_it: + while np.abs(Dr + Di * 1j) > tol or counter == max_it: # derivative Drp, Dip = self.D_RL(wR, ki, +1, 1) # update - wR[0] = wR[0] - xp.real((Dr + Di * 1j) / (Drp + Dip * 1j)) - wR[1] = wR[1] - xp.imag((Dr + Di * 1j) / (Drp + Dip * 1j)) + wR[0] = wR[0] - np.real((Dr + Di * 1j) / (Drp + Dip * 1j)) + wR[1] = wR[1] - np.imag((Dr + Di * 1j) / (Drp + Dip * 1j)) Dr, Di = self.D_RL(wR, ki, +1) counter += 1 @@ -566,13 +558,13 @@ def __call__(self, k, method="newton", tol=1e-10, max_it=100): Dr, Di = self.D_RL(wL, ki, -1) - while xp.abs(Dr + Di * 1j) > tol or counter == max_it: + while np.abs(Dr + Di * 1j) > tol or counter == max_it: # derivative Drp, Dip = self.D_RL(wL, ki, -1, 1) # update - wL[0] = wL[0] - xp.real((Dr + Di * 1j) / (Drp + Dip * 1j)) - wL[1] = wL[1] - xp.imag((Dr + Di * 1j) / (Drp + Dip * 1j)) + wL[0] = wL[0] - np.real((Dr + Di * 1j) / (Drp + Dip * 1j)) + wL[1] = wL[1] - np.imag((Dr + Di * 1j) / (Drp + Dip * 1j)) Dr, Di = self.D_RL(wL, ki, -1) counter += 1 @@ -659,7 +651,7 @@ def D_RL(self, w, k, pol, der=0): * (Zplasma(xi, 0) + (w - k * v0) * Zplasma(xi, 1) * xip) ) - return xp.real(out), xp.imag(out) + return np.real(out), np.imag(out) class PressureCouplingFull6DParallel(DispersionRelations1D): @@ -731,7 +723,7 @@ def __call__(self, k, tol=1e-10): # One complex array for each branch tmps = [] for n in range(self.nbranches): - tmps += [xp.zeros_like(k, dtype=complex)] + tmps += [np.zeros_like(k, dtype=complex)] ########### Model specific part ############################## @@ -743,9 +735,9 @@ def __call__(self, k, tol=1e-10): wL = [1 * ki, 0.0] # TODO: use vA wS = [1 * ki, 0.0] # TODO: use cS else: - wR = [xp.real(tmps[0][i - 1]), xp.imag(tmps[0][i - 1])] - wL = [xp.real(tmps[1][i - 1]), xp.imag(tmps[1][i - 1])] - wS = [xp.real(tmps[2][i - 1]), xp.imag(tmps[2][i - 1])] + wR = [np.real(tmps[0][i - 1]), np.imag(tmps[0][i - 1])] + wL = [np.real(tmps[1][i - 1]), np.imag(tmps[1][i - 1])] + wS = [np.real(tmps[2][i - 1]), np.imag(tmps[2][i - 1])] # R/L shear Alfvén wave sol_R = fsolve(self.D_RL, x0=wR, args=(ki, +1), xtol=tol) @@ -804,8 +796,8 @@ def D_RL(self, w, k, pol): vperp = 1.0 # TODO vth = 1.0 - vA = xp.sqrt((self.params["B0x"] ** 2 + self.params["B0y"] ** 2 + self.params["B0z"] ** 2) / self.params["n0"]) - # cS = xp.sqrt(self.params['beta']*vA) + vA = np.sqrt((self.params["B0x"] ** 2 + self.params["B0y"] ** 2 + self.params["B0z"] ** 2) / self.params["n0"]) + # cS = np.sqrt(self.params['beta']*vA) cS = 1.0 a0 = u0 / vpara # TODO @@ -848,7 +840,7 @@ def D_RL(self, w, k, pol): ) ) - return xp.real(c1), xp.imag(c1) + return np.real(c1), np.imag(c1) def D_sonic(self, w, k): r""" @@ -881,8 +873,8 @@ def D_sonic(self, w, k): vperp = 1.0 # TODO vth = 1.0 - vA = xp.sqrt((self.params["B0x"] ** 2 + self.params["B0y"] ** 2 + self.params["B0z"] ** 2) / self.params["n0"]) - # cS = xp.sqrt(self.params['beta']*vA) + vA = np.sqrt((self.params["B0x"] ** 2 + self.params["B0y"] ** 2 + self.params["B0z"] ** 2) / self.params["n0"]) + # cS = np.sqrt(self.params['beta']*vA) cS = 1.0 a0 = u0 / vpara # TODO @@ -893,7 +885,7 @@ def D_sonic(self, w, k): c1 = w**2 - k**2 * cS**2 + 2 * w * k * nu * vpara * x4 - return xp.real(c1), xp.imag(c1) + return np.real(c1), np.imag(c1) # private methods: # ---------------- @@ -1022,11 +1014,11 @@ def __call__(self, x, m, n): specs = {} # shear Alfvén continuum - specs["shear_Alfvén"] = xp.sqrt(F(x, m, n) ** 2 / rho(x)) + specs["shear_Alfvén"] = np.sqrt(F(x, m, n) ** 2 / rho(x)) # slow sound continuum - specs["slow_sound"] = xp.sqrt( - gamma * p(x) * F(x, m, n) ** 2 / (rho(x) * (gamma * p(x) + By(x) ** 2 + Bz(x) ** 2)), + specs["slow_sound"] = np.sqrt( + gamma * p(x) * F(x, m, n) ** 2 / (rho(x) * (gamma * p(x) + By(x) ** 2 + Bz(x) ** 2)) ) return specs @@ -1129,11 +1121,11 @@ def __call__(self, r, m, n): specs = {} # shear Alfvén continuum - specs["shear_Alfvén"] = xp.sqrt(F(r, m, n) ** 2 / rho(r)) + specs["shear_Alfvén"] = np.sqrt(F(r, m, n) ** 2 / rho(r)) # slow sound continuum - specs["slow_sound"] = xp.sqrt( - gamma * p(r) * F(r, m, n) ** 2 / (rho(r) * (gamma * p(r) + Bt(r) ** 2 + Bz(r) ** 2)), + specs["slow_sound"] = np.sqrt( + gamma * p(r) * F(r, m, n) ** 2 / (rho(r) * (gamma * p(r) + Bt(r) ** 2 + Bz(r) ** 2)) ) return specs diff --git a/src/struphy/dispersion_relations/base.py b/src/struphy/dispersion_relations/base.py index 6994ae2fb..31a237a90 100644 --- a/src/struphy/dispersion_relations/base.py +++ b/src/struphy/dispersion_relations/base.py @@ -2,9 +2,10 @@ from abc import ABCMeta, abstractmethod -import cunumpy as xp from matplotlib import pyplot as plt +from struphy.utils.arrays import xp as np + class DispersionRelations1D(metaclass=ABCMeta): r""" @@ -99,18 +100,18 @@ def plot(self, k): plt.ylabel(rf"Im($\omega$) [{unit_om}]") for name, omega in self.branches.items(): plt.subplot(2, 1, 1) - plt.plot(k, xp.real(omega), label=name) + plt.plot(k, np.real(omega), label=name) plt.subplot(2, 1, 2) - plt.plot(k, xp.imag(omega), label=name) + plt.plot(k, np.imag(omega), label=name) plt.subplot(2, 1, 1) for lab, kc in self.k_crit.items(): - if kc > xp.min(k) and kc < xp.max(k): + if kc > np.min(k) and kc < np.max(k): plt.axvline(kc, color="k", linestyle="--", linewidth=0.5, label=lab) plt.legend() plt.subplot(2, 1, 2) for lab, kc in self.k_crit.items(): - if kc > xp.min(k) and kc < xp.max(k): + if kc > np.min(k) and kc < np.max(k): plt.axvline(kc, color="k", linestyle="--", linewidth=0.5, label=lab) diff --git a/src/struphy/dispersion_relations/utilities.py b/src/struphy/dispersion_relations/utilities.py index b796eb321..b0f74fbb4 100644 --- a/src/struphy/dispersion_relations/utilities.py +++ b/src/struphy/dispersion_relations/utilities.py @@ -1,6 +1,7 @@ -import cunumpy as xp from scipy.special import erfi +from struphy.utils.arrays import xp as np + def Zplasma(xi, der=0): """ @@ -23,7 +24,7 @@ def Zplasma(xi, der=0): assert der == 0 or der == 1, 'Parameter "der" must be either 0 or 1' if der == 0: - z = xp.sqrt(xp.pi) * xp.exp(-(xi**2)) * (1j - erfi(xi)) + z = np.sqrt(np.pi) * np.exp(-(xi**2)) * (1j - erfi(xi)) else: z = -2 * (1 + xi * Zplasma(xi, 0)) diff --git a/src/struphy/eigenvalue_solvers/derivatives.py b/src/struphy/eigenvalue_solvers/derivatives.py index 0e34cceb1..45d8e2d94 100644 --- a/src/struphy/eigenvalue_solvers/derivatives.py +++ b/src/struphy/eigenvalue_solvers/derivatives.py @@ -6,9 +6,10 @@ Modules to assemble discrete derivatives. """ -import cunumpy as xp import scipy.sparse as spa +from struphy.utils.arrays import xp as np + # ================== 1d incident matrix ======================= def grad_1d_matrix(spl_kind, NbaseN): @@ -31,7 +32,7 @@ def grad_1d_matrix(spl_kind, NbaseN): NbaseD = NbaseN - 1 + spl_kind - grad = xp.zeros((NbaseD, NbaseN), dtype=float) + grad = np.zeros((NbaseD, NbaseN), dtype=float) for i in range(NbaseD): grad[i, i] = -1.0 @@ -79,9 +80,9 @@ def discrete_derivatives_3d(space): grad_1d_3 = 0 * spa.identity(1, format="csr") else: if space.basis_tor == "r": - grad_1d_3 = 2 * xp.pi * space.n_tor * spa.csr_matrix(xp.array([[0.0, 1.0], [-1.0, 0.0]])) + grad_1d_3 = 2 * np.pi * space.n_tor * spa.csr_matrix(np.array([[0.0, 1.0], [-1.0, 0.0]])) else: - grad_1d_3 = 1j * 2 * xp.pi * space.n_tor * spa.identity(1, format="csr") + grad_1d_3 = 1j * 2 * np.pi * space.n_tor * spa.identity(1, format="csr") # standard tensor-product derivatives if space.ck == -1: diff --git a/src/struphy/eigenvalue_solvers/kernels_projectors_global.py b/src/struphy/eigenvalue_solvers/kernels_projectors_global.py index f01cefc1c..4f6c01392 100644 --- a/src/struphy/eigenvalue_solvers/kernels_projectors_global.py +++ b/src/struphy/eigenvalue_solvers/kernels_projectors_global.py @@ -24,13 +24,7 @@ def kernel_int_2d(nq1: "int", nq2: "int", w1: "float[:]", w2: "float[:]", mat_f: # ========= kernel for integration in 3d ================== def kernel_int_3d( - nq1: "int", - nq2: "int", - nq3: "int", - w1: "float[:]", - w2: "float[:]", - w3: "float[:]", - mat_f: "float[:,:,:]", + nq1: "int", nq2: "int", nq3: "int", w1: "float[:]", w2: "float[:]", w3: "float[:]", mat_f: "float[:,:,:]" ) -> "float": f_loc = 0.0 @@ -53,11 +47,7 @@ def kernel_int_3d( # ========= kernel for integration along eta1 direction, reducing to a 2d array ============================ def kernel_int_2d_eta1( - subs1: "int[:]", - subs_cum1: "int[:]", - w1: "float[:,:]", - mat_f: "float[:,:,:]", - f_int: "float[:,:]", + subs1: "int[:]", subs_cum1: "int[:]", w1: "float[:,:]", mat_f: "float[:,:,:]", f_int: "float[:,:]" ): n1, n2 = shape(f_int) @@ -76,11 +66,7 @@ def kernel_int_2d_eta1( # ========= kernel for integration along eta2 direction, reducing to a 2d array ============================ def kernel_int_2d_eta2( - subs2: "int[:]", - subs_cum2: "int[:]", - w2: "float[:,:]", - mat_f: "float[:,:,:]", - f_int: "float[:,:]", + subs2: "int[:]", subs_cum2: "int[:]", w2: "float[:,:]", mat_f: "float[:,:,:]", f_int: "float[:,:]" ): n1, n2 = shape(f_int) @@ -181,11 +167,7 @@ def kernel_int_2d_eta1_eta2_old(w1: "float[:,:]", w2: "float[:,:]", mat_f: "floa # ========= kernel for integration along eta1 direction, reducing to a 3d array ============================ def kernel_int_3d_eta1( - subs1: "int[:]", - subs_cum1: "int[:]", - w1: "float[:,:]", - mat_f: "float[:,:,:,:]", - f_int: "float[:,:,:]", + subs1: "int[:]", subs_cum1: "int[:]", w1: "float[:,:]", mat_f: "float[:,:,:,:]", f_int: "float[:,:,:]" ): n1, n2, n3 = shape(f_int) @@ -204,11 +186,7 @@ def kernel_int_3d_eta1( def kernel_int_3d_eta1_transpose( - subs1: "int[:]", - subs_cum1: "int[:]", - w1: "float[:,:]", - f_int: "float[:,:,:]", - mat_f: "float[:,:,:,:]", + subs1: "int[:]", subs_cum1: "int[:]", w1: "float[:,:]", f_int: "float[:,:,:]", mat_f: "float[:,:,:,:]" ): n1, n2, n3 = shape(f_int) @@ -227,11 +205,7 @@ def kernel_int_3d_eta1_transpose( # ========= kernel for integration along eta2 direction, reducing to a 3d array ============================ def kernel_int_3d_eta2( - subs2: "int[:]", - subs_cum2: "int[:]", - w2: "float[:,:]", - mat_f: "float[:,:,:,:]", - f_int: "float[:,:,:]", + subs2: "int[:]", subs_cum2: "int[:]", w2: "float[:,:]", mat_f: "float[:,:,:,:]", f_int: "float[:,:,:]" ): n1, n2, n3 = shape(f_int) @@ -250,11 +224,7 @@ def kernel_int_3d_eta2( def kernel_int_3d_eta2_transpose( - subs2: "int[:]", - subs_cum2: "int[:]", - w2: "float[:,:]", - f_int: "float[:,:,:]", - mat_f: "float[:,:,:,:]", + subs2: "int[:]", subs_cum2: "int[:]", w2: "float[:,:]", f_int: "float[:,:,:]", mat_f: "float[:,:,:,:]" ): n1, n2, n3 = shape(f_int) @@ -273,11 +243,7 @@ def kernel_int_3d_eta2_transpose( # ========= kernel for integration along eta3 direction, reducing to a 3d array ============================ def kernel_int_3d_eta3( - subs3: "int[:]", - subs_cum3: "int[:]", - w3: "float[:,:]", - mat_f: "float[:,:,:,:]", - f_int: "float[:,:,:]", + subs3: "int[:]", subs_cum3: "int[:]", w3: "float[:,:]", mat_f: "float[:,:,:,:]", f_int: "float[:,:,:]" ): n1, n2, n3 = shape(f_int) @@ -296,11 +262,7 @@ def kernel_int_3d_eta3( def kernel_int_3d_eta3_transpose( - subs3: "int[:]", - subs_cum3: "int[:]", - w3: "float[:,:]", - f_int: "float[:,:,:]", - mat_f: "float[:,:,:,:]", + subs3: "int[:]", subs_cum3: "int[:]", w3: "float[:,:]", f_int: "float[:,:,:]", mat_f: "float[:,:,:,:]" ): n1, n2, n3 = shape(f_int) @@ -693,11 +655,7 @@ def kernel_int_3d_eta1_eta2_eta3_transpose( # ========= kernel for integration in eta1-eta2-eta3 cell, reducing to a 3d array ======================= def kernel_int_3d_eta1_eta2_eta3_old( - w1: "float[:,:]", - w2: "float[:,:]", - w3: "float[:,:]", - mat_f: "float[:,:,:,:,:,:]", - f_int: "float[:,:,:]", + w1: "float[:,:]", w2: "float[:,:]", w3: "float[:,:]", mat_f: "float[:,:,:,:,:,:]", f_int: "float[:,:,:]" ): ne1, nq1, ne2, nq2, ne3, nq3 = shape(mat_f) diff --git a/src/struphy/eigenvalue_solvers/legacy/MHD_eigenvalues_cylinder_1D.py b/src/struphy/eigenvalue_solvers/legacy/MHD_eigenvalues_cylinder_1D.py index a16b44e3d..e60d565fb 100644 --- a/src/struphy/eigenvalue_solvers/legacy/MHD_eigenvalues_cylinder_1D.py +++ b/src/struphy/eigenvalue_solvers/legacy/MHD_eigenvalues_cylinder_1D.py @@ -1,4 +1,3 @@ -import cunumpy as xp import scipy as sc import scipy.sparse as spa import scipy.special as sp @@ -10,6 +9,7 @@ import struphy.eigenvalue_solvers.mass_matrices_1d as mass import struphy.eigenvalue_solvers.projectors_global as pro import struphy.eigenvalue_solvers.spline_space as spl +from struphy.utils.arrays import xp as np # numerical solution of the general ideal MHD eigenvalue problem in a cylinder using 1d B-splines in radial direction @@ -21,7 +21,7 @@ def solve_ev_problem(rho, B_phi, dB_phi, B_z, p, gamma, a, k, m, num_params, bcZ r = lambda eta: a * eta # jacobian for integration - jac = lambda eta1: a * xp.ones(eta1.shape, dtype=float) + jac = lambda eta1: a * np.ones(eta1.shape, dtype=float) # ========================== kinetic energy functional ============================== # integrands (multiplied by -2/omega**2) @@ -46,11 +46,11 @@ def solve_ev_problem(rho, B_phi, dB_phi, B_z, p, gamma, a, k, m, num_params, bcZ # Bspline_A = Bsp.Bspline(splines.T, splines.p ) # Bspline_B = Bsp.Bspline(splines.t, splines.p - 1) # - # K_11_scipy = xp.zeros((splines.NbaseN, splines.NbaseN), dtype=float) - # K_22_scipy = xp.zeros((splines.NbaseD, splines.NbaseD), dtype=float) - # K_33_scipy = xp.zeros((splines.NbaseD, splines.NbaseD), dtype=float) - # K_23_scipy = xp.zeros((splines.NbaseD, splines.NbaseD), dtype=float) - # K_32_scipy = xp.zeros((splines.NbaseD, splines.NbaseD), dtype=float) + # K_11_scipy = np.zeros((splines.NbaseN, splines.NbaseN), dtype=float) + # K_22_scipy = np.zeros((splines.NbaseD, splines.NbaseD), dtype=float) + # K_33_scipy = np.zeros((splines.NbaseD, splines.NbaseD), dtype=float) + # K_23_scipy = np.zeros((splines.NbaseD, splines.NbaseD), dtype=float) + # K_32_scipy = np.zeros((splines.NbaseD, splines.NbaseD), dtype=float) # # for i in range(1, Bspline_A.N - 1): # for j in range(1, Bspline_A.N - 1): @@ -76,11 +76,11 @@ def solve_ev_problem(rho, B_phi, dB_phi, B_z, p, gamma, a, k, m, num_params, bcZ # integrand = lambda eta : a*K_ZV(eta)*Bspline_B(eta, i)*Bspline_B(eta, j) # K_32_scipy[i, j] = integrate.quad(integrand, 0., 1.)[0] - # assert xp.allclose(K_11.toarray(), K_11_scipy[1:-1, 1:-1]) - # assert xp.allclose(K_22.toarray(), K_22_scipy ) - # assert xp.allclose(K_33.toarray(), K_33_scipy[bcZ:, bcZ:]) - # assert xp.allclose(K_23.toarray(), K_23_scipy[ : , bcZ:]) - # assert xp.allclose(K_32.toarray(), K_32_scipy[bcZ:, :]) + # assert np.allclose(K_11.toarray(), K_11_scipy[1:-1, 1:-1]) + # assert np.allclose(K_22.toarray(), K_22_scipy ) + # assert np.allclose(K_33.toarray(), K_33_scipy[bcZ:, bcZ:]) + # assert np.allclose(K_23.toarray(), K_23_scipy[ : , bcZ:]) + # assert np.allclose(K_32.toarray(), K_32_scipy[bcZ:, :]) # ========================== potential energy functional =========================== # integrands (multiplied by 2) @@ -120,17 +120,17 @@ def solve_ev_problem(rho, B_phi, dB_phi, B_z, p, gamma, a, k, m, num_params, bcZ ) W_dXZ = lambda eta: B_phi(r(eta)) * gamma * m * p(r(eta)) / r(eta) ** 2 + B_z(r(eta)) * gamma * k * p(r(eta)) / r( - eta, + eta ) W_ZdX = lambda eta: B_phi(r(eta)) * gamma * m * p(r(eta)) / r(eta) ** 2 + B_z(r(eta)) * gamma * k * p(r(eta)) / r( - eta, + eta ) W_VZ = lambda eta: B_phi(r(eta)) * gamma * m**2 * p(r(eta)) / r(eta) ** 2 + B_z(r(eta)) * gamma * k * m * p( - r(eta), + r(eta) ) / r(eta) W_ZV = lambda eta: B_phi(r(eta)) * gamma * m**2 * p(r(eta)) / r(eta) ** 2 + B_z(r(eta)) * gamma * k * m * p( - r(eta), + r(eta) ) / r(eta) # compute matrices @@ -163,15 +163,15 @@ def solve_ev_problem(rho, B_phi, dB_phi, B_z, p, gamma, a, k, m, num_params, bcZ # return W_22 ## test correct computation - # W_11_scipy = xp.zeros((splines.NbaseN, splines.NbaseN), dtype=float) - # W_22_scipy = xp.zeros((splines.NbaseD, splines.NbaseD), dtype=float) - # W_33_scipy = xp.zeros((splines.NbaseD, splines.NbaseD), dtype=float) - # W_12_scipy = xp.zeros((splines.NbaseN, splines.NbaseD), dtype=float) - # W_21_scipy = xp.zeros((splines.NbaseD, splines.NbaseN), dtype=float) - # W_13_scipy = xp.zeros((splines.NbaseN, splines.NbaseD), dtype=float) - # W_31_scipy = xp.zeros((splines.NbaseD, splines.NbaseN), dtype=float) - # W_23_scipy = xp.zeros((splines.NbaseD, splines.NbaseD), dtype=float) - # W_32_scipy = xp.zeros((splines.NbaseD, splines.NbaseD), dtype=float) + # W_11_scipy = np.zeros((splines.NbaseN, splines.NbaseN), dtype=float) + # W_22_scipy = np.zeros((splines.NbaseD, splines.NbaseD), dtype=float) + # W_33_scipy = np.zeros((splines.NbaseD, splines.NbaseD), dtype=float) + # W_12_scipy = np.zeros((splines.NbaseN, splines.NbaseD), dtype=float) + # W_21_scipy = np.zeros((splines.NbaseD, splines.NbaseN), dtype=float) + # W_13_scipy = np.zeros((splines.NbaseN, splines.NbaseD), dtype=float) + # W_31_scipy = np.zeros((splines.NbaseD, splines.NbaseN), dtype=float) + # W_23_scipy = np.zeros((splines.NbaseD, splines.NbaseD), dtype=float) + # W_32_scipy = np.zeros((splines.NbaseD, splines.NbaseD), dtype=float) # # for i in range(1, Bspline_A.N - 1): # for j in range(1, Bspline_A.N - 1): @@ -187,15 +187,15 @@ def solve_ev_problem(rho, B_phi, dB_phi, B_z, p, gamma, a, k, m, num_params, bcZ # integrand = lambda eta : W_XdX(eta) * Bspline_A(eta, i, 0) * Bspline_A(eta, j, 1) # W_11_scipy[i, j] += integrate.quad(integrand, 0., 1.)[0] # - # assert xp.allclose(W_11.toarray(), W_11_scipy[1:-1, 1:-1]) + # assert np.allclose(W_11.toarray(), W_11_scipy[1:-1, 1:-1]) - # print(xp.allclose(K, K.T)) - # print(xp.allclose(W, W.T)) + # print(np.allclose(K, K.T)) + # print(np.allclose(W, W.T)) # solve eigenvalue problem omega**2*K*xi = W*xi - A = xp.linalg.inv(K).dot(W) + A = np.linalg.inv(K).dot(W) - omega2, XVZ_eig = xp.linalg.eig(A) + omega2, XVZ_eig = np.linalg.eig(A) # extract components X_eig = XVZ_eig[: (splines.NbaseN - 2), :] @@ -203,11 +203,11 @@ def solve_ev_problem(rho, B_phi, dB_phi, B_z, p, gamma, a, k, m, num_params, bcZ Z_eig = XVZ_eig[(splines.NbaseN - 2 + splines.NbaseD) :, :] # add boundary conditions X(0) = X(1) = 0 - X_eig = xp.vstack((xp.zeros(X_eig.shape[1], dtype=float), X_eig, xp.zeros(X_eig.shape[1], dtype=float))) + X_eig = np.vstack((np.zeros(X_eig.shape[1], dtype=float), X_eig, np.zeros(X_eig.shape[1], dtype=float))) # add boundary condition Z(0) = 0 if bcZ == 1: - Z_eig = xp.vstack((xp.zeros(Z_eig.shape[1], dtype=float), Z_eig)) + Z_eig = np.vstack((np.zeros(Z_eig.shape[1], dtype=float), Z_eig)) return omega2, X_eig, V_eig, Z_eig @@ -225,43 +225,43 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, # components of metric tensor and Jacobian determinant G_r = a**2 - G_phi = lambda eta: 4 * xp.pi**2 * r(eta) ** 2 - G_z = 4 * xp.pi**2 * R0**2 - J = lambda eta: 4 * xp.pi**2 * R0 * a * r(eta) + G_phi = lambda eta: 4 * np.pi**2 * r(eta) ** 2 + G_z = 4 * np.pi**2 * R0**2 + J = lambda eta: 4 * np.pi**2 * R0 * a * r(eta) # 2-from components of equilibrium magnetic field and its projection - B2_phi = lambda eta: 2 * xp.pi * R0 * a * B_phi(r(eta)) - B2_z = lambda eta: 2 * xp.pi * a * r(eta) * B_z(r(eta)) + B2_phi = lambda eta: 2 * np.pi * R0 * a * B_phi(r(eta)) + B2_z = lambda eta: 2 * np.pi * a * r(eta) * B_z(r(eta)) - b2_eq_phi = xp.linalg.solve(proj.D.toarray(), proj.rhs_1(B2_phi)) - b2_eq_z = xp.append(xp.array([0.0]), xp.linalg.solve(proj.D.toarray()[1:, 1:], proj.rhs_1(B2_z)[1:])) + b2_eq_phi = np.linalg.solve(proj.D.toarray(), proj.rhs_1(B2_phi)) + b2_eq_z = np.append(np.array([0.0]), np.linalg.solve(proj.D.toarray()[1:, 1:], proj.rhs_1(B2_z)[1:])) # 3-form components of equilibrium density and pessure and its projection Rho3 = lambda eta: J(eta) * Rho(r(eta)) P3 = lambda eta: J(eta) * P(r(eta)) - rho3_eq = xp.append(xp.array([0.0]), xp.linalg.solve(proj.D.toarray()[1:, 1:], proj.rhs_1(Rho3)[1:])) - p3_eq = xp.append(xp.array([0.0]), xp.linalg.solve(proj.D.toarray()[1:, 1:], proj.rhs_1(P3)[1:])) + rho3_eq = np.append(np.array([0.0]), np.linalg.solve(proj.D.toarray()[1:, 1:], proj.rhs_1(Rho3)[1:])) + p3_eq = np.append(np.array([0.0]), np.linalg.solve(proj.D.toarray()[1:, 1:], proj.rhs_1(P3)[1:])) # 2-form components of initial velocity and its projection U2_r = lambda eta: J(eta) * eta * (1 - eta) u2_r = proj.pi_0(U2_r) - u2_phi = -1 / (2 * xp.pi * m) * GRAD_all.dot(u2_r) - u2_z = xp.zeros(len(u2_phi), dtype=float) + u2_phi = -1 / (2 * np.pi * m) * GRAD_all.dot(u2_r) + u2_z = np.zeros(len(u2_phi), dtype=float) - b2_r = xp.zeros(len(u2_r), dtype=float) - b2_phi = xp.zeros(len(u2_phi), dtype=float) - b2_z = xp.zeros(len(u2_z), dtype=float) + b2_r = np.zeros(len(u2_r), dtype=float) + b2_phi = np.zeros(len(u2_phi), dtype=float) + b2_z = np.zeros(len(u2_z), dtype=float) - p3 = xp.zeros(len(u2_z), dtype=float) + p3 = np.zeros(len(u2_z), dtype=float) # projection matrices pi0_N_i, pi0_D_i, pi1_N_i, pi1_D_i = proj.projection_matrices_1d_reduced() pi0_NN_i, pi0_DN_i, pi0_ND_i, pi0_DD_i, pi1_NN_i, pi1_DN_i, pi1_ND_i, pi1_DD_i = proj.projection_matrices_1d() # 1D collocation matrices for interpolation in format (point, global basis function) - x_int = xp.copy(proj.x_int) + x_int = np.copy(proj.x_int) kind_splines = [False, True] @@ -270,35 +270,29 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, # 1D integration sub-intervals, quadrature points and weights if splines.p % 2 == 0: - x_his = xp.union1d(x_int, splines.el_b) + x_his = np.union1d(x_int, splines.el_b) else: - x_his = xp.copy(x_int) + x_his = np.copy(x_int) pts, wts = bsp.quadrature_grid(x_his, proj.pts_loc, proj.wts_loc) # compute number of sub-intervals for integrations (even degree) if splines.p % 2 == 0: - subs = 2 * xp.ones(proj.pts.shape[0], dtype=int) + subs = 2 * np.ones(proj.pts.shape[0], dtype=int) subs[: splines.p // 2] = 1 subs[-splines.p // 2 :] = 1 # compute number of sub-intervals for integrations (odd degree) else: - subs = xp.ones(proj.pts.shape[0], dtype=int) + subs = np.ones(proj.pts.shape[0], dtype=int) # evaluate basis functions on quadrature points in format (interval, local quad. point, global basis function) basis_his_N = bsp.collocation_matrix(splines.T, splines.p, pts.flatten(), False, normalize=kind_splines[0]).reshape( - pts.shape[0], - pts.shape[1], - splines.NbaseN, + pts.shape[0], pts.shape[1], splines.NbaseN ) basis_his_D = bsp.collocation_matrix( - splines.t, - splines.p - 1, - pts.flatten(), - False, - normalize=kind_splines[1], + splines.t, splines.p - 1, pts.flatten(), False, normalize=kind_splines[1] ).reshape(pts.shape[0], pts.shape[1], splines.NbaseD) # shift first interpolation point away from pole @@ -314,26 +308,26 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, M2_z = mass.get_M1(splines, mapping=lambda eta: J(eta) / G_z).toarray() # === matrices for curl of equilibrium field (with integration by parts) ========== - MB_12_eq = xp.empty((splines.NbaseN, splines.NbaseD), dtype=float) - MB_13_eq = xp.empty((splines.NbaseN, splines.NbaseD), dtype=float) + MB_12_eq = np.empty((splines.NbaseN, splines.NbaseD), dtype=float) + MB_13_eq = np.empty((splines.NbaseN, splines.NbaseD), dtype=float) - MB_21_eq = xp.empty((splines.NbaseD, splines.NbaseN), dtype=float) - MB_31_eq = xp.empty((splines.NbaseD, splines.NbaseN), dtype=float) + MB_21_eq = np.empty((splines.NbaseD, splines.NbaseN), dtype=float) + MB_31_eq = np.empty((splines.NbaseD, splines.NbaseN), dtype=float) - f_phi = xp.linalg.inv(proj.N.toarray()).T.dot(GRAD_all.T.dot(M2_phi.dot(b2_eq_phi))) - f_z = xp.linalg.inv(proj.N.toarray()).T.dot(GRAD_all.T.dot(M2_z.dot(b2_eq_z))) + f_phi = np.linalg.inv(proj.N.toarray()).T.dot(GRAD_all.T.dot(M2_phi.dot(b2_eq_phi))) + f_z = np.linalg.inv(proj.N.toarray()).T.dot(GRAD_all.T.dot(M2_z.dot(b2_eq_z))) - pi0_ND_phi = xp.empty(pi0_ND_i[3].max() + 1, dtype=float) - pi0_ND_z = xp.empty(pi0_ND_i[3].max() + 1, dtype=float) + pi0_ND_phi = np.empty(pi0_ND_i[3].max() + 1, dtype=float) + pi0_ND_z = np.empty(pi0_ND_i[3].max() + 1, dtype=float) - row_ND = xp.empty(pi0_ND_i[3].max() + 1, dtype=int) - col_ND = xp.empty(pi0_ND_i[3].max() + 1, dtype=int) + row_ND = np.empty(pi0_ND_i[3].max() + 1, dtype=int) + col_ND = np.empty(pi0_ND_i[3].max() + 1, dtype=int) - pi0_DN_phi = xp.empty(pi0_DN_i[3].max() + 1, dtype=float) - pi0_DN_z = xp.empty(pi0_DN_i[3].max() + 1, dtype=float) + pi0_DN_phi = np.empty(pi0_DN_i[3].max() + 1, dtype=float) + pi0_DN_z = np.empty(pi0_DN_i[3].max() + 1, dtype=float) - row_DN = xp.empty(pi0_DN_i[3].max() + 1, dtype=int) - col_DN = xp.empty(pi0_DN_i[3].max() + 1, dtype=int) + row_DN = np.empty(pi0_DN_i[3].max() + 1, dtype=int) + col_DN = np.empty(pi0_DN_i[3].max() + 1, dtype=int) ker.rhs0_f_1d(pi0_ND_i, basis_int_N, basis_int_D, 1 / J(x_int), f_phi, pi0_ND_phi, row_ND, col_ND) ker.rhs0_f_1d(pi0_ND_i, basis_int_N, basis_int_D, 1 / J(x_int), f_z, pi0_ND_z, row_ND, col_ND) @@ -354,23 +348,23 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, MB_31_eq[:, :] = -pi0_DN_z # === matrices for curl of equilibrium field (without integration by parts) ====== - MB_12_eq = xp.empty((splines.NbaseN, splines.NbaseD), dtype=float) - MB_13_eq = xp.empty((splines.NbaseN, splines.NbaseD), dtype=float) + MB_12_eq = np.empty((splines.NbaseN, splines.NbaseD), dtype=float) + MB_13_eq = np.empty((splines.NbaseN, splines.NbaseD), dtype=float) - MB_21_eq = xp.empty((splines.NbaseD, splines.NbaseN), dtype=float) - MB_31_eq = xp.empty((splines.NbaseD, splines.NbaseN), dtype=float) + MB_21_eq = np.empty((splines.NbaseD, splines.NbaseN), dtype=float) + MB_31_eq = np.empty((splines.NbaseD, splines.NbaseN), dtype=float) - cN = xp.empty(splines.NbaseN, dtype=float) - cD = xp.empty(splines.NbaseD, dtype=float) + cN = np.empty(splines.NbaseN, dtype=float) + cD = np.empty(splines.NbaseD, dtype=float) for j in range(splines.NbaseD): cD[:] = 0.0 cD[j] = 1.0 integrand2 = ( - lambda eta: splines.evaluate_D(eta, cD) / J(eta) * 2 * xp.pi * a * (B_phi(r(eta)) + r(eta) * dB_phi(r(eta))) + lambda eta: splines.evaluate_D(eta, cD) / J(eta) * 2 * np.pi * a * (B_phi(r(eta)) + r(eta) * dB_phi(r(eta))) ) - integrand3 = lambda eta: splines.evaluate_D(eta, cD) / J(eta) * 2 * xp.pi * a * R0 * dB_z(r(eta)) + integrand3 = lambda eta: splines.evaluate_D(eta, cD) / J(eta) * 2 * np.pi * a * R0 * dB_z(r(eta)) MB_12_eq[:, j] = inner.inner_prod_V0(splines, integrand2) MB_13_eq[:, j] = inner.inner_prod_V0(splines, integrand3) @@ -380,39 +374,39 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, cN[j] = 1.0 integrand2 = ( - lambda eta: splines.evaluate_N(eta, cN) / J(eta) * 2 * xp.pi * a * (B_phi(r(eta)) + r(eta) * dB_phi(r(eta))) + lambda eta: splines.evaluate_N(eta, cN) / J(eta) * 2 * np.pi * a * (B_phi(r(eta)) + r(eta) * dB_phi(r(eta))) ) - integrand3 = lambda eta: splines.evaluate_N(eta, cN) / J(eta) * 2 * xp.pi * a * R0 * dB_z(r(eta)) + integrand3 = lambda eta: splines.evaluate_N(eta, cN) / J(eta) * 2 * np.pi * a * R0 * dB_z(r(eta)) MB_21_eq[:, j] = inner.inner_prod_V1(splines, integrand2) MB_31_eq[:, j] = inner.inner_prod_V1(splines, integrand3) # ===== right-hand sides of projection matrices =============== - rhs0_N_phi = xp.empty(pi0_N_i[0].size, dtype=float) - rhs0_N_z = xp.empty(pi0_N_i[0].size, dtype=float) + rhs0_N_phi = np.empty(pi0_N_i[0].size, dtype=float) + rhs0_N_z = np.empty(pi0_N_i[0].size, dtype=float) - rhs1_D_phi = xp.empty(pi1_D_i[0].size, dtype=float) - rhs1_D_z = xp.empty(pi1_D_i[0].size, dtype=float) + rhs1_D_phi = np.empty(pi1_D_i[0].size, dtype=float) + rhs1_D_z = np.empty(pi1_D_i[0].size, dtype=float) - rhs0_N_pr = xp.empty(pi0_N_i[0].size, dtype=float) - rhs1_D_pr = xp.empty(pi1_D_i[0].size, dtype=float) + rhs0_N_pr = np.empty(pi0_N_i[0].size, dtype=float) + rhs1_D_pr = np.empty(pi1_D_i[0].size, dtype=float) - rhs0_N_rho = xp.empty(pi0_N_i[0].size, dtype=float) - rhs1_D_rho = xp.empty(pi1_D_i[0].size, dtype=float) + rhs0_N_rho = np.empty(pi0_N_i[0].size, dtype=float) + rhs1_D_rho = np.empty(pi1_D_i[0].size, dtype=float) # ker.rhs0_1d(pi0_N_i[0], pi0_N_i[1], basis_int_N, splines.evaluate_D(x_int, b2_eq_phi)/J(x_int), rhs0_N_phi) # ker.rhs0_1d(pi0_N_i[0], pi0_N_i[1], basis_int_N, splines.evaluate_D(x_int, b2_eq_z )/J(x_int), rhs0_N_z ) # - # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, xp.append(0, xp.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), b2_eq_z )/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_z) - # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, xp.append(0, xp.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), b2_eq_phi)/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_phi) + # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), b2_eq_z )/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_z) + # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), b2_eq_phi)/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_phi) # # ker.rhs0_1d(pi0_N_i[0], pi0_N_i[1], basis_int_N, splines.evaluate_D(x_int, p3_eq)/J(x_int), rhs0_N_pr) - # temp = xp.empty(pi0_N_i[0].size, dtype=float) + # temp = np.empty(pi0_N_i[0].size, dtype=float) # temp[:] = rhs0_N_pr - # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, xp.append(0, xp.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), p3_eq)/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_pr) + # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), p3_eq)/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_pr) # # ker.rhs0_1d(pi0_N_i[0], pi0_N_i[1], basis_int_N, splines.evaluate_D(x_int, rho3)/J(x_int), rhs0_N_rho) - # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, xp.append(0, xp.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), rho3)/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_rho) + # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), rho3)/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_rho) ker.rhs0_1d(pi0_N_i[0], pi0_N_i[1], basis_int_N, B2_phi(x_int) / J(x_int), rhs0_N_phi) ker.rhs0_1d(pi0_N_i[0], pi0_N_i[1], basis_int_N, B2_z(x_int) / J(x_int), rhs0_N_z) @@ -421,7 +415,7 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, pi1_D_i[0], pi1_D_i[1], subs, - xp.append(0, xp.cumsum(subs - 1)[:-1]), + np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (B2_phi(pts.flatten()) / J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), @@ -432,20 +426,20 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, pi1_D_i[0], pi1_D_i[1], subs, - xp.append(0, xp.cumsum(subs - 1)[:-1]), + np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (B2_z(pts.flatten()) / J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_z, ) - # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, xp.append(0, xp.cumsum(subs - 1)[:-1]), wts, basis_his_D, xp.ones(pts.shape, dtype=float), rhs1_D_z) + # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, np.ones(pts.shape, dtype=float), rhs1_D_z) ker.rhs0_1d(pi0_N_i[0], pi0_N_i[1], basis_int_N, P3(x_int) / J(x_int), rhs0_N_pr) ker.rhs1_1d( pi1_D_i[0], pi1_D_i[1], subs, - xp.append(0, xp.cumsum(subs - 1)[:-1]), + np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (P3(pts.flatten()) / J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), @@ -457,7 +451,7 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, pi1_D_i[0], pi1_D_i[1], subs, - xp.append(0, xp.cumsum(subs - 1)[:-1]), + np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (Rho3(pts.flatten()) / J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), @@ -465,14 +459,12 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, ) rhs0_N_phi = spa.csr_matrix( - (rhs0_N_phi, (pi0_N_i[0], pi0_N_i[1])), - shape=(splines.NbaseN, splines.NbaseN), + (rhs0_N_phi, (pi0_N_i[0], pi0_N_i[1])), shape=(splines.NbaseN, splines.NbaseN) ).toarray() rhs0_N_z = spa.csr_matrix((rhs0_N_z, (pi0_N_i[0], pi0_N_i[1])), shape=(splines.NbaseN, splines.NbaseN)).toarray() rhs1_D_phi = spa.csr_matrix( - (rhs1_D_phi, (pi1_D_i[0], pi1_D_i[1])), - shape=(splines.NbaseD, splines.NbaseD), + (rhs1_D_phi, (pi1_D_i[0], pi1_D_i[1])), shape=(splines.NbaseD, splines.NbaseD) ).toarray() rhs1_D_z = spa.csr_matrix((rhs1_D_z, (pi1_D_i[0], pi1_D_i[1])), shape=(splines.NbaseD, splines.NbaseD)).toarray() @@ -482,144 +474,142 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, rhs1_D_pr = spa.csr_matrix((rhs1_D_pr, (pi1_D_i[0], pi1_D_i[1])), shape=(splines.NbaseD, splines.NbaseD)).toarray() rhs0_N_rho = spa.csr_matrix( - (rhs0_N_rho, (pi0_N_i[0], pi0_N_i[1])), - shape=(splines.NbaseN, splines.NbaseN), + (rhs0_N_rho, (pi0_N_i[0], pi0_N_i[1])), shape=(splines.NbaseN, splines.NbaseN) ).toarray() rhs1_D_rho = spa.csr_matrix( - (rhs1_D_rho, (pi1_D_i[0], pi1_D_i[1])), - shape=(splines.NbaseD, splines.NbaseD), + (rhs1_D_rho, (pi1_D_i[0], pi1_D_i[1])), shape=(splines.NbaseD, splines.NbaseD) ).toarray() - pi0_N_phi = xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_phi[1:-1, 1:-1]) - pi0_N_z = xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_z[1:-1, 1:-1]) + pi0_N_phi = np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_phi[1:-1, 1:-1]) + pi0_N_z = np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_z[1:-1, 1:-1]) - pi1_D_phi = xp.linalg.inv(proj.D.toarray()).dot(rhs1_D_phi) - pi1_D_z = xp.linalg.inv(proj.D.toarray()).dot(rhs1_D_z) + pi1_D_phi = np.linalg.inv(proj.D.toarray()).dot(rhs1_D_phi) + pi1_D_z = np.linalg.inv(proj.D.toarray()).dot(rhs1_D_z) - pi0_N_pr = xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_pr[1:-1, 1:-1]) - pi1_D_pr = xp.linalg.inv(proj.D.toarray()).dot(rhs1_D_pr) + pi0_N_pr = np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_pr[1:-1, 1:-1]) + pi1_D_pr = np.linalg.inv(proj.D.toarray()).dot(rhs1_D_pr) - pi0_N_rho = xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_rho[1:-1, 1:-1]) - pi1_D_rho = xp.linalg.inv(proj.D.toarray()).dot(rhs1_D_rho) + pi0_N_rho = np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_rho[1:-1, 1:-1]) + pi1_D_rho = np.linalg.inv(proj.D.toarray()).dot(rhs1_D_rho) # ======= matrices in strong induction equation ================ # 11 block - I_11 = -2 * xp.pi * m * pi0_N_phi - 2 * xp.pi * n * pi0_N_z + I_11 = -2 * np.pi * m * pi0_N_phi - 2 * np.pi * n * pi0_N_z # 21 block and 31 block I_21 = -GRAD.dot(pi0_N_phi) I_31 = -GRAD.dot(pi0_N_z) # 22 block and 32 block - I_22 = 2 * xp.pi * n * pi1_D_z - I_32 = -2 * xp.pi * m * pi1_D_z + I_22 = 2 * np.pi * n * pi1_D_z + I_32 = -2 * np.pi * m * pi1_D_z # 23 block and 33 block - I_23 = -2 * xp.pi * n * pi1_D_phi - I_33 = 2 * xp.pi * m * pi1_D_phi + I_23 = -2 * np.pi * n * pi1_D_phi + I_33 = 2 * np.pi * m * pi1_D_phi # total - I_all = xp.block( + I_all = np.block( [ - [I_11, xp.zeros((len(u2_r) - 2, len(u2_phi))), xp.zeros((len(u2_r) - 2, len(u2_z) - 1))], + [I_11, np.zeros((len(u2_r) - 2, len(u2_phi))), np.zeros((len(u2_r) - 2, len(u2_z) - 1))], [I_21, I_22, I_23[:, 1:]], [I_31[1:, :], I_32[1:, :], I_33[1:, 1:]], - ], + ] ) # ======= matrices in strong pressure equation ================ P_1 = -GRAD.dot(pi0_N_pr) - (gamma - 1) * pi1_D_pr.dot(GRAD) - P_2 = -2 * xp.pi * m * gamma * pi1_D_pr - P_3 = -2 * xp.pi * n * gamma * pi1_D_pr + P_2 = -2 * np.pi * m * gamma * pi1_D_pr + P_3 = -2 * np.pi * n * gamma * pi1_D_pr - P_all = xp.block([[P_1[1:, :], P_2[1:, :], P_3[1:, 1:]]]) + P_all = np.block([[P_1[1:, :], P_2[1:, :], P_3[1:, 1:]]]) # ========== matrices in weak momentum balance equation ====== A_1 = 1 / 2 * (pi0_N_rho.T.dot(M2_r) + M2_r.dot(pi0_N_rho)) A_2 = 1 / 2 * (pi1_D_rho.T.dot(M2_phi) + M2_phi.dot(pi1_D_rho)) A_3 = 1 / 2 * (pi1_D_rho.T.dot(M2_z) + M2_z.dot(pi1_D_rho))[:, :] - A_all = xp.block( + A_all = np.block( [ - [A_1, xp.zeros((A_1.shape[0], A_2.shape[1])), xp.zeros((A_1.shape[0], A_3.shape[1]))], - [xp.zeros((A_2.shape[0], A_1.shape[1])), A_2, xp.zeros((A_2.shape[0], A_3.shape[1]))], - [xp.zeros((A_3.shape[0], A_1.shape[1])), xp.zeros((A_3.shape[0], A_2.shape[1])), A_3], - ], + [A_1, np.zeros((A_1.shape[0], A_2.shape[1])), np.zeros((A_1.shape[0], A_3.shape[1]))], + [np.zeros((A_2.shape[0], A_1.shape[1])), A_2, np.zeros((A_2.shape[0], A_3.shape[1]))], + [np.zeros((A_3.shape[0], A_1.shape[1])), np.zeros((A_3.shape[0], A_2.shape[1])), A_3], + ] ) - MB_11 = 2 * xp.pi * n * pi0_N_z.T.dot(M2_r) + 2 * xp.pi * m * pi0_N_phi.T.dot(M2_r) + MB_11 = 2 * np.pi * n * pi0_N_z.T.dot(M2_r) + 2 * np.pi * m * pi0_N_phi.T.dot(M2_r) MB_12 = pi0_N_phi.T.dot(GRAD.T.dot(M2_phi)) - MB_12_eq[1:-1, :] MB_13 = pi0_N_z.T.dot(GRAD.T.dot(M2_z)) - MB_13_eq[1:-1, :] MB_14 = GRAD.T.dot(M3) MB_21 = MB_21_eq[:, 1:-1] - MB_22 = -2 * xp.pi * n * pi1_D_z.T.dot(M2_phi) - MB_23 = 2 * xp.pi * m * pi1_D_z.T.dot(M2_z) - MB_24 = 2 * xp.pi * m * M3 + MB_22 = -2 * np.pi * n * pi1_D_z.T.dot(M2_phi) + MB_23 = 2 * np.pi * m * pi1_D_z.T.dot(M2_z) + MB_24 = 2 * np.pi * m * M3 MB_31 = MB_31_eq[:, 1:-1] - MB_32 = 2 * xp.pi * n * pi1_D_phi.T.dot(M2_phi) - MB_33 = -2 * xp.pi * m * pi1_D_phi.T.dot(M2_z) - MB_34 = 2 * xp.pi * n * M3 + MB_32 = 2 * np.pi * n * pi1_D_phi.T.dot(M2_phi) + MB_33 = -2 * np.pi * m * pi1_D_phi.T.dot(M2_z) + MB_34 = 2 * np.pi * n * M3 - MB_b_all = xp.block( - [[MB_11, MB_12, MB_13[:, 1:]], [MB_21, MB_22, MB_23[:, 1:]], [MB_31[1:, :], MB_32[1:, :], MB_33[1:, 1:]]], + MB_b_all = np.block( + [[MB_11, MB_12, MB_13[:, 1:]], [MB_21, MB_22, MB_23[:, 1:]], [MB_31[1:, :], MB_32[1:, :], MB_33[1:, 1:]]] ) - MB_p_all = xp.block([[MB_14[:, 1:]], [MB_24[:, 1:]], [MB_34[1:, 1:]]]) + MB_p_all = np.block([[MB_14[:, 1:]], [MB_24[:, 1:]], [MB_34[1:, 1:]]]) ## ======= matrices in strong induction equation ================ ## 11 block - # I_11 = xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(-2*xp.pi*m*rhs0_N_phi[1:-1, 1:-1] - 2*xp.pi*n*rhs0_N_z[1:-1, 1:-1]) + # I_11 = np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(-2*np.pi*m*rhs0_N_phi[1:-1, 1:-1] - 2*np.pi*n*rhs0_N_z[1:-1, 1:-1]) # ## 21 block and 31 block - # I_21 = -GRAD[: , 1:-1].dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_phi[1:-1, 1:-1])) - # I_31 = -GRAD[1:, 1:-1].dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_z[1:-1, 1:-1])) + # I_21 = -GRAD[: , 1:-1].dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_phi[1:-1, 1:-1])) + # I_31 = -GRAD[1:, 1:-1].dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_z[1:-1, 1:-1])) # ## 22 block and 32 block - # I_22 = 2*xp.pi*n*xp.linalg.inv(proj.D.toarray()[ :, :]).dot(rhs1_D_z[ :, :]) - # I_32 = -2*xp.pi*m*xp.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_z[1:, :]) + # I_22 = 2*np.pi*n*np.linalg.inv(proj.D.toarray()[ :, :]).dot(rhs1_D_z[ :, :]) + # I_32 = -2*np.pi*m*np.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_z[1:, :]) # ## 23 block and 33 block - # I_23 = -2*xp.pi*n*xp.linalg.inv(proj.D.toarray()[ :, :]).dot(rhs1_D_phi[ :, 1:]) - # I_33 = 2*xp.pi*m*xp.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_phi[1:, 1:]) + # I_23 = -2*np.pi*n*np.linalg.inv(proj.D.toarray()[ :, :]).dot(rhs1_D_phi[ :, 1:]) + # I_33 = 2*np.pi*m*np.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_phi[1:, 1:]) # # ## ======= matrices in strong pressure equation ================ - # P_1 = -GRAD[1:, 1:-1].dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_pr[1:-1, 1:-1])) - (gamma - 1)*xp.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_pr[1:, :].dot(GRAD[:, 1:-1])) - # P_2 = -2*xp.pi*m*gamma*xp.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_pr[1:, :]) - # P_3 = -2*xp.pi*n*gamma*xp.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_pr[1:, 1:]) + # P_1 = -GRAD[1:, 1:-1].dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_pr[1:-1, 1:-1])) - (gamma - 1)*np.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_pr[1:, :].dot(GRAD[:, 1:-1])) + # P_2 = -2*np.pi*m*gamma*np.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_pr[1:, :]) + # P_3 = -2*np.pi*n*gamma*np.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_pr[1:, 1:]) # # ## ========== matrices in weak momentum balance equation ====== - # rhs0_N_rho = xp.empty(pi0_N_i[0].size, dtype=float) + # rhs0_N_rho = np.empty(pi0_N_i[0].size, dtype=float) # ker.rhs0_1d(pi0_N_i[0], pi0_N_i[1], basis_int_N, splines.evaluate_D(x_int, rho3)/J(x_int), rhs0_N_rho) # # - # rhs1_D_rho = xp.empty(pi1_D_i[0].size, dtype=float) - # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, xp.append(0, xp.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), rho3)/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_rho) + # rhs1_D_rho = np.empty(pi1_D_i[0].size, dtype=float) + # ker.rhs1_1d(pi1_D_i[0], pi1_D_i[1], subs, np.append(0, np.cumsum(subs - 1)[:-1]), wts, basis_his_D, (splines.evaluate_D(pts.flatten(), rho3)/J(pts.flatten())).reshape(pts.shape[0], pts.shape[1]), rhs1_D_rho) # # # - # A_1 = 1/2*(rhs0_N_rho[1:-1, 1:-1].T.dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(M2_r[1:-1, 1:-1])) + M2_r[1:-1, 1:-1].dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_rho[1:-1, 1:-1]))) - # A_2 = 1/2*(rhs1_D_rho.T.dot(xp.linalg.inv(proj.D.toarray()[:, :]).T.dot(M2_phi)) + M2_phi.dot(xp.linalg.inv(proj.D.toarray()[:, :]).dot(rhs1_D_rho))) - # A_3 = 1/2*(rhs1_D_rho[1:, 1:].T.dot(xp.linalg.inv(proj.D.toarray()[1:, 1:]).T.dot(M2_z[1:, 1:])) + M2_z[1:, 1:].dot(xp.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_rho[1:, 1:]))) + # A_1 = 1/2*(rhs0_N_rho[1:-1, 1:-1].T.dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(M2_r[1:-1, 1:-1])) + M2_r[1:-1, 1:-1].dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).dot(rhs0_N_rho[1:-1, 1:-1]))) + # A_2 = 1/2*(rhs1_D_rho.T.dot(np.linalg.inv(proj.D.toarray()[:, :]).T.dot(M2_phi)) + M2_phi.dot(np.linalg.inv(proj.D.toarray()[:, :]).dot(rhs1_D_rho))) + # A_3 = 1/2*(rhs1_D_rho[1:, 1:].T.dot(np.linalg.inv(proj.D.toarray()[1:, 1:]).T.dot(M2_z[1:, 1:])) + M2_z[1:, 1:].dot(np.linalg.inv(proj.D.toarray()[1:, 1:]).dot(rhs1_D_rho[1:, 1:]))) # # - # MB_11 = 2*xp.pi*n*rhs0_N_z[1:-1, 1:-1].T.dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(M2_r[1:-1, 1:-1])) + 2*xp.pi*m*rhs0_N_phi[1:-1, 1:-1].T.dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(M2_r[1:-1, 1:-1])) + # MB_11 = 2*np.pi*n*rhs0_N_z[1:-1, 1:-1].T.dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(M2_r[1:-1, 1:-1])) + 2*np.pi*m*rhs0_N_phi[1:-1, 1:-1].T.dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(M2_r[1:-1, 1:-1])) # - # MB_12 = rhs0_N_phi[1:-1, 1:-1].T.dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(GRAD[:, 1:-1].T.dot(M2_phi))) - # MB_13 = rhs0_N_z[1:-1, 1:-1].T.dot(xp.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(GRAD[1:, 1:-1].T.dot(M2_z[1:, 1:]))) + # MB_12 = rhs0_N_phi[1:-1, 1:-1].T.dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(GRAD[:, 1:-1].T.dot(M2_phi))) + # MB_13 = rhs0_N_z[1:-1, 1:-1].T.dot(np.linalg.inv(proj.N.toarray()[1:-1, 1:-1]).T.dot(GRAD[1:, 1:-1].T.dot(M2_z[1:, 1:]))) # # MB_14 = GRAD[1:, 1:-1].T.dot(M3[1:, 1:]) # # - # MB_22 = -2*xp.pi*n*rhs1_D_z.T.dot(xp.linalg.inv(proj.D.toarray()).T.dot(M2_phi)) - # MB_23 = 2*xp.pi*m*rhs1_D_z[1:, :].T.dot(xp.linalg.inv(proj.D.toarray()[1:, 1:]).T.dot(M2_z[1:, 1:])) - # MB_24 = 2*xp.pi*m*M3[ :, 1:] + # MB_22 = -2*np.pi*n*rhs1_D_z.T.dot(np.linalg.inv(proj.D.toarray()).T.dot(M2_phi)) + # MB_23 = 2*np.pi*m*rhs1_D_z[1:, :].T.dot(np.linalg.inv(proj.D.toarray()[1:, 1:]).T.dot(M2_z[1:, 1:])) + # MB_24 = 2*np.pi*m*M3[ :, 1:] # - # MB_32 = 2*xp.pi*n*rhs1_D_phi[:, 1:].T.dot(xp.linalg.inv(proj.D.toarray()).T.dot(M2_phi)) - # MB_33 = -2*xp.pi*m*rhs1_D_phi[1:, 1:].T.dot(xp.linalg.inv(proj.D.toarray()[1:, 1:]).T.dot(M2_z[1:, 1:])) - # MB_34 = 2*xp.pi*n*M3[1:, 1:] + # MB_32 = 2*np.pi*n*rhs1_D_phi[:, 1:].T.dot(np.linalg.inv(proj.D.toarray()).T.dot(M2_phi)) + # MB_33 = -2*np.pi*m*rhs1_D_phi[1:, 1:].T.dot(np.linalg.inv(proj.D.toarray()[1:, 1:]).T.dot(M2_z[1:, 1:])) + # MB_34 = 2*np.pi*n*M3[1:, 1:] # # # ==== matrices in eigenvalue problem ======== @@ -635,17 +625,17 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, W_32 = MB_32.dot(I_22) + MB_33.dot(I_32) + MB_34.dot(P_2) W_33 = MB_32.dot(I_23) + MB_33.dot(I_33) + MB_34.dot(P_3) - # W = xp.block([[W_11, W_12, W_13[:, 1:]], [W_21, W_22, W_23[:, 1:]], [W_31[1:, :], W_32[1:, :], W_33[1:, 1:]]]) - W = xp.block([[W_11, W_12, W_13[:, :]], [W_21, W_22, W_23[:, :]], [W_31[:, :], W_32[:, :], W_33[:, :]]]) + # W = np.block([[W_11, W_12, W_13[:, 1:]], [W_21, W_22, W_23[:, 1:]], [W_31[1:, :], W_32[1:, :], W_33[1:, 1:]]]) + W = np.block([[W_11, W_12, W_13[:, :]], [W_21, W_22, W_23[:, :]], [W_31[:, :], W_32[:, :], W_33[:, :]]]) - # print(xp.allclose(K, K.T)) - # print(xp.allclose(W, W.T)) + # print(np.allclose(K, K.T)) + # print(np.allclose(W, W.T)) # solve eigenvalue problem omega**2*K*xi = W*xi - MAT = xp.linalg.inv(-A_all).dot(W) + MAT = np.linalg.inv(-A_all).dot(W) - omega2, XYZ_eig = xp.linalg.eig(MAT) - # omega2, XYZ_eig = xp.linalg.eig(xp.linalg.inv(-A_all).dot(MB_b_all.dot(I_all) + MB_p_all.dot(P_all))) + omega2, XYZ_eig = np.linalg.eig(MAT) + # omega2, XYZ_eig = np.linalg.eig(np.linalg.inv(-A_all).dot(MB_b_all.dot(I_all) + MB_p_all.dot(P_all))) # extract components X_eig = XYZ_eig[: (splines.NbaseN - 2), :] @@ -653,71 +643,71 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, Z_eig = XYZ_eig[(splines.NbaseN - 2 + splines.NbaseD) :, :] # add boundary conditions X(0) = X(1) = 0 - X_eig = xp.vstack((xp.zeros(X_eig.shape[1], dtype=float), X_eig, xp.zeros(X_eig.shape[1], dtype=float))) + X_eig = np.vstack((np.zeros(X_eig.shape[1], dtype=float), X_eig, np.zeros(X_eig.shape[1], dtype=float))) # add boundary condition Z(0) = 0 - Z_eig = xp.vstack((xp.zeros(Z_eig.shape[1], dtype=float), Z_eig)) + Z_eig = np.vstack((np.zeros(Z_eig.shape[1], dtype=float), Z_eig)) return omega2, X_eig, Y_eig, Z_eig ## ========== matrices in initial value problem === - LHS = xp.block( + LHS = np.block( [ - [A_all, xp.zeros((A_all.shape[0], A_all.shape[1])), xp.zeros((A_all.shape[0], len(p3) - 1))], + [A_all, np.zeros((A_all.shape[0], A_all.shape[1])), np.zeros((A_all.shape[0], len(p3) - 1))], [ - xp.zeros((A_all.shape[0], A_all.shape[1])), - xp.identity(A_all.shape[0]), - xp.zeros((A_all.shape[0], len(p3) - 1)), + np.zeros((A_all.shape[0], A_all.shape[1])), + np.identity(A_all.shape[0]), + np.zeros((A_all.shape[0], len(p3) - 1)), ], [ - xp.zeros((len(p3) - 1, A_all.shape[1])), - xp.zeros((len(p3) - 1, A_all.shape[1])), - xp.identity(len(p3) - 1), + np.zeros((len(p3) - 1, A_all.shape[1])), + np.zeros((len(p3) - 1, A_all.shape[1])), + np.identity(len(p3) - 1), ], - ], + ] ) - RHS = xp.block( + RHS = np.block( [ - [xp.zeros((MB_b_all.shape[0], I_all.shape[1])), MB_b_all, MB_p_all], - [I_all, xp.zeros((I_all.shape[0], MB_b_all.shape[1])), xp.zeros((I_all.shape[0], MB_p_all.shape[1]))], - [P_all, xp.zeros((P_all.shape[0], MB_b_all.shape[1])), xp.zeros((P_all.shape[0], MB_p_all.shape[1]))], - ], + [np.zeros((MB_b_all.shape[0], I_all.shape[1])), MB_b_all, MB_p_all], + [I_all, np.zeros((I_all.shape[0], MB_b_all.shape[1])), np.zeros((I_all.shape[0], MB_p_all.shape[1]))], + [P_all, np.zeros((P_all.shape[0], MB_b_all.shape[1])), np.zeros((P_all.shape[0], MB_p_all.shape[1]))], + ] ) dt = 0.05 T = 200.0 Nt = int(T / dt) - UPDATE = xp.linalg.inv(LHS - dt / 2 * RHS).dot(LHS + dt / 2 * RHS) - ##UPDATE = xp.linalg.inv(LHS).dot(LHS + dt*RHS) + UPDATE = np.linalg.inv(LHS - dt / 2 * RHS).dot(LHS + dt / 2 * RHS) + ##UPDATE = np.linalg.inv(LHS).dot(LHS + dt*RHS) # - # lambdas, eig_vecs = xp.linalg.eig(UPDATE) + # lambdas, eig_vecs = np.linalg.eig(UPDATE) # return lambdas # # return lambdas # - u2_r_all = xp.zeros((Nt + 1, len(u2_r)), dtype=float) - u2_phi_all = xp.zeros((Nt + 1, len(u2_phi)), dtype=float) - u2_z_all = xp.zeros((Nt + 1, len(u2_z)), dtype=float) + u2_r_all = np.zeros((Nt + 1, len(u2_r)), dtype=float) + u2_phi_all = np.zeros((Nt + 1, len(u2_phi)), dtype=float) + u2_z_all = np.zeros((Nt + 1, len(u2_z)), dtype=float) - b2_r_all = xp.zeros((Nt + 1, len(b2_r)), dtype=float) - b2_phi_all = xp.zeros((Nt + 1, len(b2_phi)), dtype=float) - b2_z_all = xp.zeros((Nt + 1, len(b2_z)), dtype=float) + b2_r_all = np.zeros((Nt + 1, len(b2_r)), dtype=float) + b2_phi_all = np.zeros((Nt + 1, len(b2_phi)), dtype=float) + b2_z_all = np.zeros((Nt + 1, len(b2_z)), dtype=float) - p3_all = xp.zeros((Nt + 1, len(p3)), dtype=float) + p3_all = np.zeros((Nt + 1, len(p3)), dtype=float) # initialization # u2_r_all[0, :] = u2_r # u2_phi_all[0, :] = u2_phi - u2_r_all[0, 1:-1] = xp.random.rand(len(u2_r) - 2) - p3_all[0, 1:] = xp.random.rand(len(p3) - 1) + u2_r_all[0, 1:-1] = np.random.rand(len(u2_r) - 2) + p3_all[0, 1:] = np.random.rand(len(p3) - 1) # time integration for n in range(Nt): - old = xp.concatenate( + old = np.concatenate( ( u2_r_all[n, 1:-1], u2_phi_all[n, :], @@ -726,24 +716,23 @@ def solve_ev_problem_FEEC(Rho, B_phi, dB_phi, B_z, dB_z, P, gamma, a, R0, n, m, b2_phi_all[n, :], b2_z_all[n, 1:], p3_all[n, 1:], - ), + ) ) new = UPDATE.dot(old) # extract components - unew, bnew, pnew = xp.split( - new, - [len(u2_r) - 2 + len(u2_phi) + len(u2_z) - 1, 2 * (len(u2_r) - 2 + len(u2_phi) + len(u2_z) - 1)], + unew, bnew, pnew = np.split( + new, [len(u2_r) - 2 + len(u2_phi) + len(u2_z) - 1, 2 * (len(u2_r) - 2 + len(u2_phi) + len(u2_z) - 1)] ) - u2_r_all[n + 1, :] = xp.array([0.0] + list(unew[: (splines.NbaseN - 2)]) + [0.0]) + u2_r_all[n + 1, :] = np.array([0.0] + list(unew[: (splines.NbaseN - 2)]) + [0.0]) u2_phi_all[n + 1, :] = unew[(splines.NbaseN - 2) : (splines.NbaseN - 2 + splines.NbaseD)] - u2_z_all[n + 1, :] = xp.array([0.0] + list(unew[(splines.NbaseN - 2 + splines.NbaseD) :])) + u2_z_all[n + 1, :] = np.array([0.0] + list(unew[(splines.NbaseN - 2 + splines.NbaseD) :])) - b2_r_all[n + 1, :] = xp.array([0.0] + list(bnew[: (splines.NbaseN - 2)]) + [0.0]) + b2_r_all[n + 1, :] = np.array([0.0] + list(bnew[: (splines.NbaseN - 2)]) + [0.0]) b2_phi_all[n + 1, :] = bnew[(splines.NbaseN - 2) : (splines.NbaseN - 2 + splines.NbaseD)] - b2_z_all[n + 1, :] = xp.array([0.0] + list(bnew[(splines.NbaseN - 2 + splines.NbaseD) :])) + b2_z_all[n + 1, :] = np.array([0.0] + list(bnew[(splines.NbaseN - 2 + splines.NbaseD) :])) - p3_all[n + 1, :] = xp.array([0.0] + list(pnew)) + p3_all[n + 1, :] = np.array([0.0] + list(pnew)) return u2_r_all, u2_phi_all, u2_z_all, b2_r_all, b2_phi_all, b2_z_all, p3_all, omega2 diff --git a/src/struphy/eigenvalue_solvers/legacy/control_variates/control_variate.py b/src/struphy/eigenvalue_solvers/legacy/control_variates/control_variate.py index a4b95eb06..37940a8d6 100644 --- a/src/struphy/eigenvalue_solvers/legacy/control_variates/control_variate.py +++ b/src/struphy/eigenvalue_solvers/legacy/control_variates/control_variate.py @@ -6,11 +6,11 @@ Class for control variates in delta-f method for current coupling scheme. """ -import cunumpy as xp import scipy.sparse as spa import struphy.feec.basics.kernels_3d as ker import struphy.feec.control_variates.kernels_control_variate as ker_cv +from struphy.utils.arrays import xp as np class terms_control_variate: @@ -40,7 +40,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): kind_fun_eq = [11, 12, 13, 14] # ========= evaluation of DF^(-1) * jh_eq_phys * |det(DF)| at quadrature points ========= - self.mat_jh1 = xp.empty( + self.mat_jh1 = np.empty( ( self.space.Nel[0], self.space.n_quad[0], @@ -51,7 +51,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ), dtype=float, ) - self.mat_jh2 = xp.empty( + self.mat_jh2 = np.empty( ( self.space.Nel[0], self.space.n_quad[0], @@ -62,7 +62,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ), dtype=float, ) - self.mat_jh3 = xp.empty( + self.mat_jh3 = np.empty( ( self.space.Nel[0], self.space.n_quad[0], @@ -133,7 +133,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ) # ========= evaluation of nh_eq_phys * |det(DF)| at quadrature points =================== - self.mat_nh = xp.empty( + self.mat_nh = np.empty( ( self.space.Nel[0], self.space.n_quad[0], @@ -166,7 +166,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ) # =========== 2-form magnetic field at quadrature points ================================= - self.B2_1 = xp.empty( + self.B2_1 = np.empty( ( self.space.Nel[0], self.space.n_quad[0], @@ -177,7 +177,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ), dtype=float, ) - self.B2_2 = xp.empty( + self.B2_2 = np.empty( ( self.space.Nel[0], self.space.n_quad[0], @@ -188,7 +188,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ), dtype=float, ) - self.B2_3 = xp.empty( + self.B2_3 = np.empty( ( self.space.Nel[0], self.space.n_quad[0], @@ -202,7 +202,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): # ================== correction matrices in step 1 ======================== if self.basis_u == 0: - self.M12 = xp.empty( + self.M12 = np.empty( ( self.space.NbaseN[0], self.space.NbaseN[1], @@ -213,7 +213,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ), dtype=float, ) - self.M13 = xp.empty( + self.M13 = np.empty( ( self.space.NbaseN[0], self.space.NbaseN[1], @@ -224,7 +224,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ), dtype=float, ) - self.M23 = xp.empty( + self.M23 = np.empty( ( self.space.NbaseN[0], self.space.NbaseN[1], @@ -237,7 +237,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ) elif self.basis_u == 2: - self.M12 = xp.empty( + self.M12 = np.empty( ( self.space.NbaseN[0], self.space.NbaseD[1], @@ -248,7 +248,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ), dtype=float, ) - self.M13 = xp.empty( + self.M13 = np.empty( ( self.space.NbaseN[0], self.space.NbaseD[1], @@ -259,7 +259,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u): ), dtype=float, ) - self.M23 = xp.empty( + self.M23 = np.empty( ( self.space.NbaseD[0], self.space.NbaseN[1], @@ -273,14 +273,14 @@ def __init__(self, tensor_space_FEM, domain, basis_u): # ==================== correction vectors in step 3 ======================= if self.basis_u == 0: - self.F1 = xp.empty((self.space.NbaseN[0], self.space.NbaseN[1], self.space.NbaseN[2]), dtype=float) - self.F2 = xp.empty((self.space.NbaseN[0], self.space.NbaseN[1], self.space.NbaseN[2]), dtype=float) - self.F3 = xp.empty((self.space.NbaseN[0], self.space.NbaseN[1], self.space.NbaseN[2]), dtype=float) + self.F1 = np.empty((self.space.NbaseN[0], self.space.NbaseN[1], self.space.NbaseN[2]), dtype=float) + self.F2 = np.empty((self.space.NbaseN[0], self.space.NbaseN[1], self.space.NbaseN[2]), dtype=float) + self.F3 = np.empty((self.space.NbaseN[0], self.space.NbaseN[1], self.space.NbaseN[2]), dtype=float) elif self.basis_u == 2: - self.F1 = xp.empty((self.space.NbaseN[0], self.space.NbaseD[1], self.space.NbaseD[2]), dtype=float) - self.F2 = xp.empty((self.space.NbaseD[0], self.space.NbaseN[1], self.space.NbaseD[2]), dtype=float) - self.F3 = xp.empty((self.space.NbaseD[0], self.space.NbaseD[1], self.space.NbaseN[2]), dtype=float) + self.F1 = np.empty((self.space.NbaseN[0], self.space.NbaseD[1], self.space.NbaseD[2]), dtype=float) + self.F2 = np.empty((self.space.NbaseD[0], self.space.NbaseN[1], self.space.NbaseD[2]), dtype=float) + self.F3 = np.empty((self.space.NbaseD[0], self.space.NbaseD[1], self.space.NbaseN[2]), dtype=float) # ===== inner product in V0^3 resp. V2 of (B x jh_eq) - term ========== def inner_prod_jh_eq(self, b1, b2, b3): @@ -511,7 +511,7 @@ def inner_prod_jh_eq(self, b1, b2, b3): self.B2_1 * self.mat_jh2 - self.B2_2 * self.mat_jh1, ) - return xp.concatenate((self.F1.flatten(), self.F2.flatten(), self.F3.flatten())) + return np.concatenate((self.F1.flatten(), self.F2.flatten(), self.F3.flatten())) # ===== mass matrix in V0^3 resp. V2 of -(rhoh_eq * (B x U)) - term ======= def mass_nh_eq(self, b1, b2, b3): diff --git a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fB_massless_control_variate.py b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fB_massless_control_variate.py index 39156f985..e39a463ab 100644 --- a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fB_massless_control_variate.py +++ b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fB_massless_control_variate.py @@ -1,9 +1,9 @@ -import cunumpy as xp import scipy.sparse as spa import struphy.feec.basics.kernels_3d as ker import struphy.feec.control_variates.kinetic_extended.fB_massless_kernels_control_variate as ker_cv import struphy.feec.control_variates.kinetic_extended.fnB_massless_cv_kernel_2 as ker_cv2 +from struphy.utils.arrays import xp as np def bv_right( @@ -204,7 +204,7 @@ def bv_right( ) # ========================= C.T =========================== return tensor_space_FEM.C.T.dot( - xp.concatenate((temp_twoform1.flatten(), temp_twoform2.flatten(), temp_twoform3.flatten())), + np.concatenate((temp_twoform1.flatten(), temp_twoform2.flatten(), temp_twoform3.flatten())) ) diff --git a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fB_massless_kernels_control_variate.py b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fB_massless_kernels_control_variate.py index 7f15931ec..9220cdc69 100644 --- a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fB_massless_kernels_control_variate.py +++ b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fB_massless_kernels_control_variate.py @@ -584,49 +584,13 @@ def vv( bd3[:] = b3[pd3, :pn3] * d3[:] vel[0] = eva.evaluation_kernel( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - NbaseD[0], - NbaseN[1], - NbaseN[2], - bb1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, NbaseD[0], NbaseN[1], NbaseN[2], bb1 ) vel[1] = eva.evaluation_kernel( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - NbaseN[0], - NbaseD[1], - NbaseN[2], - bb2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, NbaseN[0], NbaseD[1], NbaseN[2], bb2 ) vel[2] = eva.evaluation_kernel( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - NbaseN[0], - NbaseN[1], - NbaseD[2], - bb3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, NbaseN[0], NbaseN[1], NbaseD[2], bb3 ) # ======= here we use the linear hat function =========== ie1 = int(eta1 * Nel[0]) diff --git a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fnB_massless_control_variate.py b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fnB_massless_control_variate.py index 5e9c04eb0..cb8877c6f 100644 --- a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fnB_massless_control_variate.py +++ b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fnB_massless_control_variate.py @@ -1,8 +1,9 @@ -import cunumpy as xp import hylife.utilitis_FEEC.basics.kernels_3d as ker import hylife.utilitis_FEEC.control_variates.fnB_massless_kernels_control_variate as ker_cv import scipy.sparse as spa +from struphy.utils.arrays import xp as np + def bv_pre(tol, n, LO_inv, tensor_space_FEM, p, Nel, idnx, idny, idnz): r""" @@ -248,7 +249,7 @@ def bv_right( ) # ========================= C.T =========================== return tensor_space_FEM.C.T.dot( - xp.concatenate((temp_twoform1.flatten(), temp_twoform2.flatten(), temp_twoform3.flatten())), + np.concatenate((temp_twoform1.flatten(), temp_twoform2.flatten(), temp_twoform3.flatten())) ) @@ -429,7 +430,7 @@ def uv_right( ) # ========================= C.T =========================== temp_final = temp_final_0.flatten() + tensor_space_FEM.G.T.dot( - xp.concatenate((temp_final_1.flatten(), temp_final_2.flatten(), temp_final_3.flatten())), + np.concatenate((temp_final_1.flatten(), temp_final_2.flatten(), temp_final_3.flatten())) ) return temp_final diff --git a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fnB_massless_kernels_control_variate.py b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fnB_massless_kernels_control_variate.py index 965a7af33..f3b6fca0e 100644 --- a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fnB_massless_kernels_control_variate.py +++ b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/fnB_massless_kernels_control_variate.py @@ -212,65 +212,17 @@ def vv( bd3[:] = b3[pd3, :pn3] * d3[:] vel[0] = eva.evaluation_kernel( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - NbaseD[0], - NbaseN[1], - NbaseN[2], - bb1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, NbaseD[0], NbaseN[1], NbaseN[2], bb1 ) vel[1] = eva.evaluation_kernel( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - NbaseN[0], - NbaseD[1], - NbaseN[2], - bb2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, NbaseN[0], NbaseD[1], NbaseN[2], bb2 ) vel[2] = eva.evaluation_kernel( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - NbaseN[0], - NbaseN[1], - NbaseD[2], - bb3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, NbaseN[0], NbaseN[1], NbaseD[2], bb3 ) tt = eva.evaluation_kernel( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span1, - span2, - span3, - NbaseN[0], - NbaseN[1], - NbaseN[2], - n, + pn1, pn2, pn3, bn1, bn2, bn3, span1, span2, span3, NbaseN[0], NbaseN[1], NbaseN[2], n ) if abs(tt) > tol: U_value = 1.0 / tt diff --git a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/massless_control_variate.py b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/massless_control_variate.py index 3459ff7b2..1ea567314 100644 --- a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/massless_control_variate.py +++ b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/massless_control_variate.py @@ -1,8 +1,9 @@ -import cunumpy as xp import hylife.utilitis_FEEC.basics.kernels_3d as ker import hylife.utilitis_FEEC.control_variates.massless_kernels_control_variate as ker_cv import scipy.sparse as spa +from struphy.utils.arrays import xp as np + def bv_pre(u, uvalue, tensor_space_FEM, p, Nel, idnx, idny, idnz): r""" @@ -247,7 +248,7 @@ def bv_right( ) # ========================= C.T =========================== return tensor_space_FEM.C.T.dot( - xp.concatenate((temp_twoform1.flatten(), temp_twoform2.flatten(), temp_twoform3.flatten())), + np.concatenate((temp_twoform1.flatten(), temp_twoform2.flatten(), temp_twoform3.flatten())) ) @@ -430,7 +431,7 @@ def uv_right( ) # ========================= C.T =========================== temp_final = temp_final_0.flatten() + tensor_space_FEM.G.T.dot( - xp.concatenate((temp_final_1.flatten(), temp_final_2.flatten(), temp_final_3.flatten())), + np.concatenate((temp_final_1.flatten(), temp_final_2.flatten(), temp_final_3.flatten())) ) return temp_final diff --git a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/massless_kernels_control_variate.py b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/massless_kernels_control_variate.py index bfff64b2a..c56b67711 100644 --- a/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/massless_kernels_control_variate.py +++ b/src/struphy/eigenvalue_solvers/legacy/control_variates/kinetic_extended/massless_kernels_control_variate.py @@ -27,7 +27,6 @@ def uvpre( bn3: "float[:,:,:,:]", ): from numpy import empty, exp, zeros - # -- removed omp: #$ omp parallel # -- removed omp: #$ omp do private (ie1, ie2, ie3, q1, q2, q3, il1, il2, il3, value) @@ -196,84 +195,34 @@ def uvright( for q2 in range(nq2): for q3 in range(nq3): dft[0, 0] = DFI_11[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[0, 0]) dft[0, 1] = DFI_21[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[0, 1]) dft[0, 2] = DFI_31[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[0, 2]) dft[1, 0] = DFI_12[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[1, 0]) dft[1, 1] = DFI_22[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[1, 1]) dft[1, 2] = DFI_32[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[1, 2]) dft[2, 0] = DFI_13[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[2, 0]) dft[2, 1] = DFI_23[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[2, 1]) dft[2, 2] = DFI_33[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.df_inv(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map, components[2, 2]) detdet = df_det[ - ie1, - ie2, - ie3, - q1, - q2, - q3, + ie1, ie2, ie3, q1, q2, q3 ] # mappings_analytical.det_df(pts1[ie1, q1], pts2[ie2,q2], pts3[ie3,q3], kind_map, params_map) Jeq[0] = Jeqx[ie1, ie2, ie3, q1, q2, q3] Jeq[1] = Jeqy[ie1, ie2, ie3, q1, q2, q3] @@ -756,67 +705,19 @@ def vv( bd3[:] = b3[pd3, :pn3] * d3[:] vel[0] = eva.evaluation_kernel( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - NbaseD[0], - NbaseN[1], - NbaseN[2], - bb1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, NbaseD[0], NbaseN[1], NbaseN[2], bb1 ) vel[1] = eva.evaluation_kernel( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - NbaseN[0], - NbaseD[1], - NbaseN[2], - bb2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, NbaseN[0], NbaseD[1], NbaseN[2], bb2 ) vel[2] = eva.evaluation_kernel( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - NbaseN[0], - NbaseN[1], - NbaseD[2], - bb3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, NbaseN[0], NbaseN[1], NbaseD[2], bb3 ) U_value = exp( -eva.evaluation_kernel( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span1, - span2, - span3, - NbaseN[0], - NbaseN[1], - NbaseN[2], - u, - ), + pn1, pn2, pn3, bn1, bn2, bn3, span1, span2, span3, NbaseN[0], NbaseN[1], NbaseN[2], u + ) ) # ========= mapping evaluation ============= diff --git a/src/struphy/eigenvalue_solvers/legacy/emw_operators.py b/src/struphy/eigenvalue_solvers/legacy/emw_operators.py index 9e8c95b3f..3187ac649 100755 --- a/src/struphy/eigenvalue_solvers/legacy/emw_operators.py +++ b/src/struphy/eigenvalue_solvers/legacy/emw_operators.py @@ -6,11 +6,11 @@ Class for 2D/3D linear MHD projection operators. """ -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.kernels_3d as ker import struphy.eigenvalue_solvers.legacy.mass_matrices_3d_pre as mass_3d_pre +from struphy.utils.arrays import xp as np class EMW_operators: @@ -134,7 +134,7 @@ def __assemble_M1_cross(self, weight): Ni = self.SPACES.Nbase_1form[a] Nj = self.SPACES.Nbase_1form[b] - M[a][b] = xp.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) + M[a][b] = np.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) # evaluate metric tensor at quadrature points if a == 1 and b == 2: @@ -185,9 +185,9 @@ def __assemble_M1_cross(self, weight): mat_w, ) # convert to sparse matrix - indices = xp.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) + indices = np.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -198,14 +198,12 @@ def __assemble_M1_cross(self, weight): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M[a][b] = spa.csr_matrix( - (M[a][b].flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (M[a][b].flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M[a][b].eliminate_zeros() M = spa.bmat( - [[M[0][0], M[0][1], M[0][2]], [M[1][0], M[1][1], M[1][2]], [M[2][0], M[2][1], M[2][2]]], - format="csr", + [[M[0][0], M[0][1], M[0][2]], [M[1][0], M[1][1], M[1][2]], [M[2][0], M[2][1], M[2][2]]], format="csr" ) self.R1_mat = -self.SPACES.E1_0.dot(M.dot(self.SPACES.E1_0.T)).tocsr() diff --git a/src/struphy/eigenvalue_solvers/legacy/inner_products_1d.py b/src/struphy/eigenvalue_solvers/legacy/inner_products_1d.py index b4f019995..5cae935fb 100644 --- a/src/struphy/eigenvalue_solvers/legacy/inner_products_1d.py +++ b/src/struphy/eigenvalue_solvers/legacy/inner_products_1d.py @@ -6,9 +6,10 @@ Modules to compute inner products in 1d. """ -import cunumpy as xp import scipy.sparse as spa +from struphy.utils.arrays import xp as np + # ======= inner product in V0 ==================== def inner_prod_V0(spline_space, fun, mapping=None): @@ -39,7 +40,7 @@ def inner_prod_V0(spline_space, fun, mapping=None): # evaluation of mapping at quadrature points if mapping == None: - mat_map = xp.ones(pts.shape, dtype=float) + mat_map = np.ones(pts.shape, dtype=float) else: mat_map = mapping(pts.flatten()).reshape(pts.shape) @@ -47,7 +48,7 @@ def inner_prod_V0(spline_space, fun, mapping=None): mat_f = fun(pts.flatten()).reshape(pts.shape) # assembly - F = xp.zeros(NbaseN, dtype=float) + F = np.zeros(NbaseN, dtype=float) for ie in range(Nel): for il in range(p + 1): @@ -90,7 +91,7 @@ def inner_prod_V1(spline_space, fun, mapping=None): # evaluation of mapping at quadrature points if mapping == None: - mat_map = xp.ones(pts.shape, dtype=float) + mat_map = np.ones(pts.shape, dtype=float) else: mat_map = 1 / mapping(pts.flatten()).reshape(pts.shape) @@ -98,7 +99,7 @@ def inner_prod_V1(spline_space, fun, mapping=None): mat_f = fun(pts.flatten()).reshape(pts.shape) # assembly - F = xp.zeros(NbaseD, dtype=float) + F = np.zeros(NbaseD, dtype=float) for ie in range(Nel): for il in range(p): diff --git a/src/struphy/eigenvalue_solvers/legacy/inner_products_2d.py b/src/struphy/eigenvalue_solvers/legacy/inner_products_2d.py index 05df4725f..fd7ecabd4 100644 --- a/src/struphy/eigenvalue_solvers/legacy/inner_products_2d.py +++ b/src/struphy/eigenvalue_solvers/legacy/inner_products_2d.py @@ -6,10 +6,10 @@ Modules to compute inner products with given functions in 2D. """ -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.kernels_2d as ker +from struphy.utils.arrays import xp as np # ================ inner product in V0 =========================== @@ -25,7 +25,7 @@ def inner_prod_V0(tensor_space_FEM, domain, fun): domain : domain domain object defining the geometry - fun : callable or xp.ndarray + fun : callable or np.ndarray the 0-form with which the inner products shall be computed (either callable or 2D array with values at quadrature points) """ @@ -46,10 +46,10 @@ def inner_prod_V0(tensor_space_FEM, domain, fun): det_df = det_df.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) # evaluation of given 0-form at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size), dtype=float) if callable(fun): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") mat_f[:, :] = fun(quad_mesh[0], quad_mesh[1], 0.0) else: mat_f[:, :] = fun @@ -57,7 +57,7 @@ def inner_prod_V0(tensor_space_FEM, domain, fun): # assembly Ni = tensor_space_FEM.Nbase_0form - F = xp.zeros((Ni[0], Ni[1]), dtype=float) + F = np.zeros((Ni[0], Ni[1]), dtype=float) mat_f = mat_f.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) @@ -94,7 +94,7 @@ def inner_prod_V1(tensor_space_FEM, domain, fun): domain : domain domain object defining the geometry - fun : list of callables or xp.ndarrays + fun : list of callables or np.ndarrays the 1-form components with which the inner products shall be computed (either list of 3 callables or 2D arrays with values at quadrature points) """ @@ -127,10 +127,10 @@ def inner_prod_V1(tensor_space_FEM, domain, fun): g_inv = domain.metric_inv(pts[0].flatten(), pts[1].flatten(), 0.0) # 1-form components at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size), dtype=float) if callable(fun[0]): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") # components of global inner product F = [0, 0, 0] @@ -138,7 +138,7 @@ def inner_prod_V1(tensor_space_FEM, domain, fun): # assembly for a in range(3): Ni = tensor_space_FEM.Nbase_1form[a] - F[a] = xp.zeros((Ni[0], Ni[1]), dtype=float) + F[a] = np.zeros((Ni[0], Ni[1]), dtype=float) mat_f[:, :] = 0.0 @@ -170,7 +170,7 @@ def inner_prod_V1(tensor_space_FEM, domain, fun): mat_f * det_df, ) - F1 = tensor_space_FEM.E1_pol_0.dot(xp.concatenate((F[0].flatten(), F[1].flatten()))) + F1 = tensor_space_FEM.E1_pol_0.dot(np.concatenate((F[0].flatten(), F[1].flatten()))) F2 = tensor_space_FEM.E0_pol_0.dot(F[2].flatten()) return F1, F2 @@ -187,7 +187,7 @@ def inner_prod_V2(tensor_space_FEM, domain, fun): domain : domain domain object defining the geometry - fun : list of callables or xp.ndarrays + fun : list of callables or np.ndarrays the 2-form components with which the inner products shall be computed (either list of 3 callables or 2D arrays with values at quadrature points) """ @@ -220,10 +220,10 @@ def inner_prod_V2(tensor_space_FEM, domain, fun): g = domain.metric(pts[0].flatten(), pts[1].flatten(), 0.0) # 2-form components at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size), dtype=float) if callable(fun[0]): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") # components of global inner product F = [0, 0, 0] @@ -231,7 +231,7 @@ def inner_prod_V2(tensor_space_FEM, domain, fun): # assembly for a in range(3): Ni = tensor_space_FEM.Nbase_2form[a] - F[a] = xp.zeros((Ni[0], Ni[1]), dtype=float) + F[a] = np.zeros((Ni[0], Ni[1]), dtype=float) mat_f[:, :] = 0.0 @@ -263,7 +263,7 @@ def inner_prod_V2(tensor_space_FEM, domain, fun): mat_f / det_df, ) - F1 = tensor_space_FEM.E2_pol_0.dot(xp.concatenate((F[0].flatten(), F[1].flatten()))) + F1 = tensor_space_FEM.E2_pol_0.dot(np.concatenate((F[0].flatten(), F[1].flatten()))) F2 = tensor_space_FEM.E3_pol_0.dot(F[2].flatten()) return F1, F2 @@ -280,7 +280,7 @@ def inner_prod_V3(tensor_space_FEM, domain, fun): domain : domain domain object defining the geometry - fun : callable or xp.ndarray + fun : callable or np.ndarray the 3-form component with which the inner products shall be computed (either callable or 2D array with values at quadrature points) """ @@ -301,10 +301,10 @@ def inner_prod_V3(tensor_space_FEM, domain, fun): det_df = det_df.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) # evaluation of given 3-form at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size), dtype=float) if callable(fun): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") mat_f[:, :] = fun(quad_mesh[0], quad_mesh[1], 0.0) else: mat_f[:, :] = fun @@ -312,7 +312,7 @@ def inner_prod_V3(tensor_space_FEM, domain, fun): # assembly Ni = tensor_space_FEM.Nbase_3form - F = xp.zeros((Ni[0], Ni[1]), dtype=float) + F = np.zeros((Ni[0], Ni[1]), dtype=float) mat_f = mat_f.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) diff --git a/src/struphy/eigenvalue_solvers/legacy/inner_products_3d.py b/src/struphy/eigenvalue_solvers/legacy/inner_products_3d.py index 20d95c05c..5aa9f710a 100644 --- a/src/struphy/eigenvalue_solvers/legacy/inner_products_3d.py +++ b/src/struphy/eigenvalue_solvers/legacy/inner_products_3d.py @@ -6,10 +6,10 @@ Modules to compute inner products with given functions in 3D. """ -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.kernels_3d as ker +from struphy.utils.arrays import xp as np # ================ inner product in V0 =========================== @@ -25,7 +25,7 @@ def inner_prod_V0(tensor_space_FEM, domain, fun): domain : domain domain object defining the geometry - fun : callable or xp.ndarray + fun : callable or np.ndarray the 0-form with which the inner products shall be computed (either callable or 3D array with values at quadrature points) """ @@ -46,10 +46,10 @@ def inner_prod_V0(tensor_space_FEM, domain, fun): det_df = det_df.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]) # evaluation of given 0-form at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) if callable(fun): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") mat_f[:, :, :] = fun(quad_mesh[0], quad_mesh[1], quad_mesh[2]) else: mat_f[:, :, :] = fun @@ -57,7 +57,7 @@ def inner_prod_V0(tensor_space_FEM, domain, fun): # assembly Ni = tensor_space.Nbase_0form - F = xp.zeros((Ni[0], Ni[1], Ni[2]), dtype=float) + F = np.zeros((Ni[0], Ni[1], Ni[2]), dtype=float) mat_f = mat_f.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]) @@ -101,7 +101,7 @@ def inner_prod_V1(tensor_space_FEM, domain, fun): domain : domain domain object defining the geometry - fun : list of callables or xp.ndarrays + fun : list of callables or np.ndarrays the 1-form components with which the inner products shall be computed (either list of 3 callables or 3D arrays with values at quadrature points) """ @@ -134,10 +134,10 @@ def inner_prod_V1(tensor_space_FEM, domain, fun): g_inv = domain.metric_inv(pts[0].flatten(), pts[1].flatten(), pts[2].flatten()) # 1-form components at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) if callable(fun[0]): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") # components of global inner product F = [0, 0, 0] @@ -146,7 +146,7 @@ def inner_prod_V1(tensor_space_FEM, domain, fun): for a in range(3): Ni = tensor_space_FEM.Nbase_1form[a] - F[a] = xp.zeros((Ni[0], Ni[1], Ni[2]), dtype=float) + F[a] = np.zeros((Ni[0], Ni[1], Ni[2]), dtype=float) mat_f[:, :, :] = 0.0 @@ -185,7 +185,7 @@ def inner_prod_V1(tensor_space_FEM, domain, fun): mat_f * det_df, ) - return tensor_space_FEM.E1_0.dot(xp.concatenate((F[0].flatten(), F[1].flatten(), F[2].flatten()))) + return tensor_space_FEM.E1_0.dot(np.concatenate((F[0].flatten(), F[1].flatten(), F[2].flatten()))) # ================ inner product in V2 =========================== @@ -199,7 +199,7 @@ def inner_prod_V2(tensor_space_FEM, domain, fun): domain : domain domain object defining the geometry - fun : list of callables or xp.ndarrays + fun : list of callables or np.ndarrays the 2-form components with which the inner products shall be computed (either list of 3 callables or 3D arrays with values at quadrature points) """ @@ -232,10 +232,10 @@ def inner_prod_V2(tensor_space_FEM, domain, fun): g = domain.metric(pts[0].flatten(), pts[1].flatten(), pts[2].flatten()) # 2-form components at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) if callable(fun[0]): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") # components of global inner product F = [0, 0, 0] @@ -244,7 +244,7 @@ def inner_prod_V2(tensor_space_FEM, domain, fun): for a in range(3): Ni = tensor_space_FEM.Nbase_2form[a] - F[a] = xp.zeros((Ni[0], Ni[1], Ni[2]), dtype=float) + F[a] = np.zeros((Ni[0], Ni[1], Ni[2]), dtype=float) mat_f[:, :, :] = 0.0 @@ -283,7 +283,7 @@ def inner_prod_V2(tensor_space_FEM, domain, fun): mat_f / det_df, ) - return tensor_space_FEM.E2_0.dot(xp.concatenate((F[0].flatten(), F[1].flatten(), F[2].flatten()))) + return tensor_space_FEM.E2_0.dot(np.concatenate((F[0].flatten(), F[1].flatten(), F[2].flatten()))) # ================ inner product in V3 =========================== @@ -297,7 +297,7 @@ def inner_prod_V3(tensor_space_FEM, domain, fun): domain : domain domain object defining the geometry - fun : callable or xp.ndarray + fun : callable or np.ndarray the 3-form component with which the inner products shall be computed (either callable or 3D array with values at quadrature points) """ @@ -318,10 +318,10 @@ def inner_prod_V3(tensor_space_FEM, domain, fun): det_df = det_df.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]) # evaluation of given 3-form at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) if callable(fun): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") mat_f[:, :, :] = fun(quad_mesh[0], quad_mesh[1], quad_mesh[2]) else: mat_f[:, :, :] = fun @@ -329,7 +329,7 @@ def inner_prod_V3(tensor_space_FEM, domain, fun): # assembly Ni = tensor_space.Nbase_3form - F = xp.zeros((Ni[0], Ni[1], Ni[2]), dtype=float) + F = np.zeros((Ni[0], Ni[1], Ni[2]), dtype=float) ker.kernel_inner( Nel[0], diff --git a/src/struphy/eigenvalue_solvers/legacy/l2_error_1d.py b/src/struphy/eigenvalue_solvers/legacy/l2_error_1d.py index d568d3207..f8544c1cf 100644 --- a/src/struphy/eigenvalue_solvers/legacy/l2_error_1d.py +++ b/src/struphy/eigenvalue_solvers/legacy/l2_error_1d.py @@ -6,9 +6,10 @@ Modules to compute L2-errors in 1d. """ -import cunumpy as xp import scipy.sparse as spa +from struphy.utils.arrays import xp as np + # ======= error in V0 ==================== def l2_error_V0(spline_space, mapping, coeff, fun): @@ -47,7 +48,7 @@ def l2_error_V0(spline_space, mapping, coeff, fun): mat_f = fun(pts) # assembly - error = xp.zeros(Nel, dtype=float) + error = np.zeros(Nel, dtype=float) for ie in range(Nel): for q in range(n_quad): @@ -58,7 +59,7 @@ def l2_error_V0(spline_space, mapping, coeff, fun): error[ie] += wts[ie, q] * (bi - mat_f[ie, q]) ** 2 - return xp.sqrt(error.sum()) + return np.sqrt(error.sum()) # ======= error in V1 ==================== @@ -98,7 +99,7 @@ def l2_error_V1(spline_space, mapping, coeff, fun): mat_f = fun(pts) # assembly - error = xp.zeros(Nel, dtype=float) + error = np.zeros(Nel, dtype=float) for ie in range(Nel): for q in range(n_quad): @@ -109,4 +110,4 @@ def l2_error_V1(spline_space, mapping, coeff, fun): error[ie] += wts[ie, q] * (bi - mat_f[ie, q]) ** 2 - return xp.sqrt(error.sum()) + return np.sqrt(error.sum()) diff --git a/src/struphy/eigenvalue_solvers/legacy/l2_error_2d.py b/src/struphy/eigenvalue_solvers/legacy/l2_error_2d.py index 452dd570b..818d0d7c2 100644 --- a/src/struphy/eigenvalue_solvers/legacy/l2_error_2d.py +++ b/src/struphy/eigenvalue_solvers/legacy/l2_error_2d.py @@ -6,10 +6,10 @@ Modules to compute L2-errors of discrete p-forms with analytical forms in 2D. """ -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.kernels_2d as ker +from struphy.utils.arrays import xp as np # ======= error in V0 ==================== @@ -25,7 +25,7 @@ def l2_error_V0(tensor_space_FEM, domain, f0, c0, method="standard"): domain : domain domain object defining the geometry - f0 : callable or xp.ndarray + f0 : callable or np.ndarray the 0-form with which the error shall be computed c0 : array_like @@ -63,12 +63,12 @@ def l2_error_V0(tensor_space_FEM, domain, f0, c0, method="standard"): # evaluation of exact 0-form at quadrature points if callable(f0): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") f0 = f0(quad_mesh[0], quad_mesh[1], 0.0) if method == "standard": # evaluation of discrete 0-form at quadrature points - f0_h = tensor_space_FEM.evaluate_NN(pts[0].flatten(), pts[1].flatten(), xp.array([0.0]), c0, "V0")[:, :, 0] + f0_h = tensor_space_FEM.evaluate_NN(pts[0].flatten(), pts[1].flatten(), np.array([0.0]), c0, "V0")[:, :, 0] # compute error error = 0.0 @@ -78,7 +78,7 @@ def l2_error_V0(tensor_space_FEM, domain, f0, c0, method="standard"): else: # compute error in each element - error = xp.zeros(Nel[:2], dtype=float) + error = np.zeros(Nel[:2], dtype=float) ker.kernel_l2error( Nel, @@ -106,7 +106,7 @@ def l2_error_V0(tensor_space_FEM, domain, f0, c0, method="standard"): error = error.sum() - return xp.sqrt(error) + return np.sqrt(error) # ======= error in V1 ==================== @@ -122,7 +122,7 @@ def l2_error_V1(tensor_space_FEM, domain, f1, c1, method="standard"): domain : domain domain object defining the geometry - f1 : list of callables or xp.ndarrays + f1 : list of callables or np.ndarrays the three 1-form components with which the error shall be computed c1 : list of array_like @@ -162,16 +162,16 @@ def l2_error_V1(tensor_space_FEM, domain, f1, c1, method="standard"): # evaluation of exact 1-form components at quadrature points if callable(f1[0]): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") f1_1 = f1[0](quad_mesh[0], quad_mesh[1], 0.0) f1_2 = f1[1](quad_mesh[0], quad_mesh[1], 0.0) f1_3 = f1[2](quad_mesh[0], quad_mesh[1], 0.0) if method == "standard": # evaluation of discrete 1-form components at quadrature points - f1_h_1 = tensor_space_FEM.evaluate_DN(pts[0].flatten(), pts[1].flatten(), xp.array([0.0]), c1_1, "V1")[:, :, 0] - f1_h_2 = tensor_space_FEM.evaluate_ND(pts[0].flatten(), pts[1].flatten(), xp.array([0.0]), c1_2, "V1")[:, :, 0] - f1_h_3 = tensor_space_FEM.evaluate_NN(pts[0].flatten(), pts[1].flatten(), xp.array([0.0]), c1_3, "V1")[:, :, 0] + f1_h_1 = tensor_space_FEM.evaluate_DN(pts[0].flatten(), pts[1].flatten(), np.array([0.0]), c1_1, "V1")[:, :, 0] + f1_h_2 = tensor_space_FEM.evaluate_ND(pts[0].flatten(), pts[1].flatten(), np.array([0.0]), c1_2, "V1")[:, :, 0] + f1_h_3 = tensor_space_FEM.evaluate_NN(pts[0].flatten(), pts[1].flatten(), np.array([0.0]), c1_3, "V1")[:, :, 0] # compute error error = 0.0 @@ -194,7 +194,7 @@ def l2_error_V1(tensor_space_FEM, domain, f1, c1, method="standard"): else: # compute error in each element - error = xp.zeros(Nel[:2], dtype=float) + error = np.zeros(Nel[:2], dtype=float) # 1 * d_f1 * G^11 * |det(DF)| * d_f1 ker.kernel_l2error( @@ -298,7 +298,7 @@ def l2_error_V1(tensor_space_FEM, domain, f1, c1, method="standard"): error = error.sum() - return xp.sqrt(error) + return np.sqrt(error) # ======= error in V2 ==================== @@ -314,7 +314,7 @@ def l2_error_V2(tensor_space_FEM, domain, f2, c2, method="standard"): domain : domain domain object defining the geometry - f2 : list of callables or xp.ndarrays + f2 : list of callables or np.ndarrays the three 2-form components with which the error shall be computed c2 : list of array_like @@ -354,16 +354,16 @@ def l2_error_V2(tensor_space_FEM, domain, f2, c2, method="standard"): # evaluation of exact 2-form components at quadrature points if callable(f2[0]): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") f2_1 = f2[0](quad_mesh[0], quad_mesh[1], 0.0) f2_2 = f2[1](quad_mesh[0], quad_mesh[1], 0.0) f2_3 = f2[2](quad_mesh[0], quad_mesh[1], 0.0) if method == "standard": # evaluation of discrete 2-form components at quadrature points - f2_h_1 = tensor_space_FEM.evaluate_ND(pts[0].flatten(), pts[1].flatten(), xp.array([0.0]), c2_1, "V2")[:, :, 0] - f2_h_2 = tensor_space_FEM.evaluate_DN(pts[0].flatten(), pts[1].flatten(), xp.array([0.0]), c2_2, "V2")[:, :, 0] - f2_h_3 = tensor_space_FEM.evaluate_DD(pts[0].flatten(), pts[1].flatten(), xp.array([0.0]), c2_3, "V2")[:, :, 0] + f2_h_1 = tensor_space_FEM.evaluate_ND(pts[0].flatten(), pts[1].flatten(), np.array([0.0]), c2_1, "V2")[:, :, 0] + f2_h_2 = tensor_space_FEM.evaluate_DN(pts[0].flatten(), pts[1].flatten(), np.array([0.0]), c2_2, "V2")[:, :, 0] + f2_h_3 = tensor_space_FEM.evaluate_DD(pts[0].flatten(), pts[1].flatten(), np.array([0.0]), c2_3, "V2")[:, :, 0] # compute error error = 0.0 @@ -386,7 +386,7 @@ def l2_error_V2(tensor_space_FEM, domain, f2, c2, method="standard"): else: # compute error in each element - error = xp.zeros(Nel[:2], dtype=float) + error = np.zeros(Nel[:2], dtype=float) # 1 * d_f1 * G_11 / |det(DF)| * d_f1 ker.kernel_l2error( @@ -490,7 +490,7 @@ def l2_error_V2(tensor_space_FEM, domain, f2, c2, method="standard"): error = error.sum() - return xp.sqrt(error) + return np.sqrt(error) # ======= error in V3 ==================== @@ -506,7 +506,7 @@ def l2_error_V3(tensor_space_FEM, domain, f3, c3, method="standard"): domain : domain domain object defining the geometry - f3 : callable or xp.ndarray + f3 : callable or np.ndarray the 3-form component with which the error shall be computed c3 : array_like @@ -544,12 +544,12 @@ def l2_error_V3(tensor_space_FEM, domain, f3, c3, method="standard"): # evaluation of exact 3-form at quadrature points if callable(f3): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), indexing="ij") f3 = f3(quad_mesh[0], quad_mesh[1], 0.0) if method == "standard": # evaluation of discrete 3-form at quadrature points - f3_h = tensor_space_FEM.evaluate_DD(pts[0].flatten(), pts[1].flatten(), xp.array([0.0]), c3, "V3")[:, :, 0] + f3_h = tensor_space_FEM.evaluate_DD(pts[0].flatten(), pts[1].flatten(), np.array([0.0]), c3, "V3")[:, :, 0] # compute error error = 0.0 @@ -559,7 +559,7 @@ def l2_error_V3(tensor_space_FEM, domain, f3, c3, method="standard"): else: # compute error in each element - error = xp.zeros(Nel[:2], dtype=float) + error = np.zeros(Nel[:2], dtype=float) ker.kernel_l2error( Nel, @@ -587,4 +587,4 @@ def l2_error_V3(tensor_space_FEM, domain, f3, c3, method="standard"): error = error.sum() - return xp.sqrt(error) + return np.sqrt(error) diff --git a/src/struphy/eigenvalue_solvers/legacy/l2_error_3d.py b/src/struphy/eigenvalue_solvers/legacy/l2_error_3d.py index 7553e3a83..39eac0b66 100644 --- a/src/struphy/eigenvalue_solvers/legacy/l2_error_3d.py +++ b/src/struphy/eigenvalue_solvers/legacy/l2_error_3d.py @@ -6,10 +6,10 @@ Modules to compute L2-errors of discrete p-forms with analytical forms in 3D. """ -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.kernels_3d as ker +from struphy.utils.arrays import xp as np # ======= error in V0 ==================== @@ -25,7 +25,7 @@ def l2_error_V0(tensor_space_FEM, domain, fun, coeff): domain : domain domain object defining the geometry - fun : callable or xp.ndarray + fun : callable or np.ndarray the 0-form with which the error shall be computed coeff : array_like @@ -54,16 +54,16 @@ def l2_error_V0(tensor_space_FEM, domain, fun, coeff): det_df = abs(domain.jacobian_det(pts[0].flatten(), pts[1].flatten(), pts[2].flatten())) # evaluation of given 0-form at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) if callable(fun): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") mat_f[:, :, :] = fun(quad_mesh[0], quad_mesh[1], quad_mesh[2]) else: mat_f[:, :, :] = fun # compute error - error = xp.zeros(Nel, dtype=float) + error = np.zeros(Nel, dtype=float) ker.kernel_l2error( Nel, @@ -94,7 +94,7 @@ def l2_error_V0(tensor_space_FEM, domain, fun, coeff): det_df.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]), ) - return xp.sqrt(error.sum()) + return np.sqrt(error.sum()) # ======= error in V1 ==================== @@ -110,7 +110,7 @@ def l2_error_V1(tensor_space_FEM, domain, fun, coeff): domain : domain domain object defining the geometry - fun : list of callables or xp.ndarrays + fun : list of callables or np.ndarrays the three 1-form components with which the error shall be computed coeff : list of array_like @@ -141,12 +141,12 @@ def l2_error_V1(tensor_space_FEM, domain, fun, coeff): metric_coeffs *= abs(domain.jacobian_det(pts[0].flatten(), pts[1].flatten(), pts[2].flatten())) # evaluation of given 1-form components at quadrature points - mat_f1 = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) - mat_f2 = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) - mat_f3 = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f1 = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f2 = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f3 = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) if callable(fun[0]): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") mat_f1[:, :, :] = fun[0](quad_mesh[0], quad_mesh[1], quad_mesh[2]) mat_f2[:, :, :] = fun[1](quad_mesh[0], quad_mesh[1], quad_mesh[2]) mat_f3[:, :, :] = fun[2](quad_mesh[0], quad_mesh[1], quad_mesh[2]) @@ -156,7 +156,7 @@ def l2_error_V1(tensor_space_FEM, domain, fun, coeff): mat_f3[:, :, :] = fun[2] # compute error - error = xp.zeros(Nel, dtype=float) + error = np.zeros(Nel, dtype=float) # 1 * f1 * G^11 * |det(DF)| * f1 ker.kernel_l2error( @@ -314,7 +314,7 @@ def l2_error_V1(tensor_space_FEM, domain, fun, coeff): 1 * metric_coeffs[2, 2].reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]), ) - return xp.sqrt(error.sum()) + return np.sqrt(error.sum()) # ======= error in V2 ==================== @@ -330,7 +330,7 @@ def l2_error_V2(tensor_space_FEM, domain, fun, coeff): domain : domain domain object defining the geometry - fun : list of callables or xp.ndarrays + fun : list of callables or np.ndarrays the three 2-form components with which the error shall be computed coeff : list of array_like @@ -361,12 +361,12 @@ def l2_error_V2(tensor_space_FEM, domain, fun, coeff): metric_coeffs /= abs(domain.jacobian_det(pts[0].flatten(), pts[1].flatten(), pts[2].flatten())) # evaluation of given 2-form components at quadrature points - mat_f1 = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) - mat_f2 = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) - mat_f3 = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f1 = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f2 = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f3 = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) if callable(fun[0]): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") mat_f1[:, :, :] = fun[0](quad_mesh[0], quad_mesh[1], quad_mesh[2]) mat_f2[:, :, :] = fun[1](quad_mesh[0], quad_mesh[1], quad_mesh[2]) mat_f3[:, :, :] = fun[2](quad_mesh[0], quad_mesh[1], quad_mesh[2]) @@ -376,7 +376,7 @@ def l2_error_V2(tensor_space_FEM, domain, fun, coeff): mat_f3[:, :, :] = fun[2] # compute error - error = xp.zeros(Nel, dtype=float) + error = np.zeros(Nel, dtype=float) # 1 * f1 * G_11 / |det(DF)| * f1 ker.kernel_l2error( @@ -534,7 +534,7 @@ def l2_error_V2(tensor_space_FEM, domain, fun, coeff): 1 * metric_coeffs[2, 2].reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]), ) - return xp.sqrt(error.sum()) + return np.sqrt(error.sum()) # ======= error in V3 ==================== @@ -550,7 +550,7 @@ def l2_error_V3(tensor_space_FEM, domain, fun, coeff): domain : domain domain object defining the geometry - fun : callable or xp.ndarray + fun : callable or np.ndarray the 3-form component with which the error shall be computed coeff : array_like @@ -579,16 +579,16 @@ def l2_error_V3(tensor_space_FEM, domain, fun, coeff): det_df = abs(domain.jacobian_det(pts[0].flatten(), pts[1].flatten(), pts[2].flatten())) # evaluation of given 3-form component at quadrature points - mat_f = xp.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) + mat_f = np.empty((pts[0].size, pts[1].size, pts[2].size), dtype=float) if callable(fun): - quad_mesh = xp.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") + quad_mesh = np.meshgrid(pts[0].flatten(), pts[1].flatten(), pts[2].flatten(), indexing="ij") mat_f[:, :, :] = fun(quad_mesh[0], quad_mesh[1], quad_mesh[2]) else: mat_f[:, :, :] = fun # compute error - error = xp.zeros(Nel, dtype=float) + error = np.zeros(Nel, dtype=float) ker.kernel_l2error( Nel, @@ -619,4 +619,4 @@ def l2_error_V3(tensor_space_FEM, domain, fun, coeff): 1 / det_df.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]), ) - return xp.sqrt(error.sum()) + return np.sqrt(error.sum()) diff --git a/src/struphy/eigenvalue_solvers/legacy/mass_matrices_3d_pre.py b/src/struphy/eigenvalue_solvers/legacy/mass_matrices_3d_pre.py index a46097a8f..673069c0e 100644 --- a/src/struphy/eigenvalue_solvers/legacy/mass_matrices_3d_pre.py +++ b/src/struphy/eigenvalue_solvers/legacy/mass_matrices_3d_pre.py @@ -6,11 +6,11 @@ Modules to obtain preconditioners for mass matrices in 3D. """ -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.spline_space as spl import struphy.linear_algebra.linalg_kron as linkron +from struphy.utils.arrays import xp as np # ================ inverse mass matrix in V0 =========================== @@ -32,9 +32,9 @@ def get_M0_PRE(tensor_space_FEM, domain): # spaces_pre[1].set_extraction_operators() # spaces_pre[2].set_extraction_operators() - spaces_pre[0].assemble_M0(lambda eta: (domain.params[1] - domain.params[0]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M0(lambda eta: (domain.params[3] - domain.params[2]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M0(lambda eta: (domain.params[5] - domain.params[4]) * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M0(lambda eta: (domain.params[1] - domain.params[0]) * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M0(lambda eta: (domain.params[3] - domain.params[2]) * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M0(lambda eta: (domain.params[5] - domain.params[4]) * np.ones(eta.shape, dtype=float)) c_pre = [spaces_pre[0].M0.toarray()[:, 0], spaces_pre[1].M0.toarray()[:, 0], spaces_pre[2].M0.toarray()[:, 0]] @@ -63,20 +63,20 @@ def get_M1_PRE(tensor_space_FEM, domain): # spaces_pre[1].set_extraction_operators() # spaces_pre[2].set_extraction_operators() - spaces_pre[0].assemble_M0(lambda eta: (domain.params[1] - domain.params[0]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M0(lambda eta: (domain.params[3] - domain.params[2]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M0(lambda eta: (domain.params[5] - domain.params[4]) * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M0(lambda eta: (domain.params[1] - domain.params[0]) * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M0(lambda eta: (domain.params[3] - domain.params[2]) * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M0(lambda eta: (domain.params[5] - domain.params[4]) * np.ones(eta.shape, dtype=float)) - spaces_pre[0].assemble_M1(lambda eta: 1 / (domain.params[1] - domain.params[0]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M1(lambda eta: 1 / (domain.params[3] - domain.params[2]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M1(lambda eta: 1 / (domain.params[5] - domain.params[4]) * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M1(lambda eta: 1 / (domain.params[1] - domain.params[0]) * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M1(lambda eta: 1 / (domain.params[3] - domain.params[2]) * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M1(lambda eta: 1 / (domain.params[5] - domain.params[4]) * np.ones(eta.shape, dtype=float)) c11_pre = [spaces_pre[0].M1.toarray()[:, 0], spaces_pre[1].M0.toarray()[:, 0], spaces_pre[2].M0.toarray()[:, 0]] c22_pre = [spaces_pre[0].M0.toarray()[:, 0], spaces_pre[1].M1.toarray()[:, 0], spaces_pre[2].M0.toarray()[:, 0]] c33_pre = [spaces_pre[0].M0.toarray()[:, 0], spaces_pre[1].M0.toarray()[:, 0], spaces_pre[2].M1.toarray()[:, 0]] def solve(x): - x1, x2, x3 = xp.split(x, 3) + x1, x2, x3 = np.split(x, 3) x1 = x1.reshape(Nel_pre[0], Nel_pre[1], Nel_pre[2]) x2 = x2.reshape(Nel_pre[0], Nel_pre[1], Nel_pre[2]) @@ -86,7 +86,7 @@ def solve(x): r2 = linkron.kron_fftsolve_3d(c22_pre, x2).flatten() r3 = linkron.kron_fftsolve_3d(c33_pre, x3).flatten() - return xp.concatenate((r1, r2, r3)) + return np.concatenate((r1, r2, r3)) return spa.linalg.LinearOperator(shape=tensor_space_FEM.M1.shape, matvec=solve) @@ -110,20 +110,20 @@ def get_M2_PRE(tensor_space_FEM, domain): # spaces_pre[1].set_extraction_operators() # spaces_pre[2].set_extraction_operators() - spaces_pre[0].assemble_M0(lambda eta: (domain.params[1] - domain.params[0]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M0(lambda eta: (domain.params[3] - domain.params[2]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M0(lambda eta: (domain.params[5] - domain.params[4]) * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M0(lambda eta: (domain.params[1] - domain.params[0]) * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M0(lambda eta: (domain.params[3] - domain.params[2]) * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M0(lambda eta: (domain.params[5] - domain.params[4]) * np.ones(eta.shape, dtype=float)) - spaces_pre[0].assemble_M1(lambda eta: 1 / (domain.params[1] - domain.params[0]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M1(lambda eta: 1 / (domain.params[3] - domain.params[2]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M1(lambda eta: 1 / (domain.params[5] - domain.params[4]) * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M1(lambda eta: 1 / (domain.params[1] - domain.params[0]) * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M1(lambda eta: 1 / (domain.params[3] - domain.params[2]) * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M1(lambda eta: 1 / (domain.params[5] - domain.params[4]) * np.ones(eta.shape, dtype=float)) c11_pre = [spaces_pre[0].M0.toarray()[:, 0], spaces_pre[1].M1.toarray()[:, 0], spaces_pre[2].M1.toarray()[:, 0]] c22_pre = [spaces_pre[0].M1.toarray()[:, 0], spaces_pre[1].M0.toarray()[:, 0], spaces_pre[2].M1.toarray()[:, 0]] c33_pre = [spaces_pre[0].M1.toarray()[:, 0], spaces_pre[1].M1.toarray()[:, 0], spaces_pre[2].M0.toarray()[:, 0]] def solve(x): - x1, x2, x3 = xp.split(x, 3) + x1, x2, x3 = np.split(x, 3) x1 = x1.reshape(Nel_pre[0], Nel_pre[1], Nel_pre[2]) x2 = x2.reshape(Nel_pre[0], Nel_pre[1], Nel_pre[2]) @@ -133,7 +133,7 @@ def solve(x): r2 = linkron.kron_fftsolve_3d(c22_pre, x2).flatten() r3 = linkron.kron_fftsolve_3d(c33_pre, x3).flatten() - return xp.concatenate((r1, r2, r3)) + return np.concatenate((r1, r2, r3)) return spa.linalg.LinearOperator(shape=tensor_space_FEM.M2.shape, matvec=solve) @@ -157,9 +157,9 @@ def get_M3_PRE(tensor_space_FEM, domain): # spaces_pre[1].set_extraction_operators() # spaces_pre[2].set_extraction_operators() - spaces_pre[0].assemble_M1(lambda eta: 1 / (domain.params[1] - domain.params[0]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M1(lambda eta: 1 / (domain.params[3] - domain.params[2]) * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M1(lambda eta: 1 / (domain.params[5] - domain.params[4]) * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M1(lambda eta: 1 / (domain.params[1] - domain.params[0]) * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M1(lambda eta: 1 / (domain.params[3] - domain.params[2]) * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M1(lambda eta: 1 / (domain.params[5] - domain.params[4]) * np.ones(eta.shape, dtype=float)) c_pre = [spaces_pre[0].M1.toarray()[:, 0], spaces_pre[1].M1.toarray()[:, 0], spaces_pre[2].M1.toarray()[:, 0]] @@ -188,26 +188,26 @@ def get_Mv_PRE(tensor_space_FEM, domain): # spaces_pre[1].set_extraction_operators() # spaces_pre[2].set_extraction_operators() - spaces_pre[0].assemble_M0(lambda eta: domain.params[0] ** 3 * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M0(lambda eta: domain.params[1] * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M0(lambda eta: domain.params[2] * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M0(lambda eta: domain.params[0] ** 3 * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M0(lambda eta: domain.params[1] * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M0(lambda eta: domain.params[2] * np.ones(eta.shape, dtype=float)) c11_pre = [spaces_pre[0].M0.toarray()[:, 0], spaces_pre[1].M0.toarray()[:, 0], spaces_pre[2].M0.toarray()[:, 0]] - spaces_pre[0].assemble_M0(lambda eta: domain.params[0] * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M0(lambda eta: domain.params[1] ** 3 * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M0(lambda eta: domain.params[2] * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M0(lambda eta: domain.params[0] * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M0(lambda eta: domain.params[1] ** 3 * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M0(lambda eta: domain.params[2] * np.ones(eta.shape, dtype=float)) c22_pre = [spaces_pre[0].M0.toarray()[:, 0], spaces_pre[1].M0.toarray()[:, 0], spaces_pre[2].M0.toarray()[:, 0]] - spaces_pre[0].assemble_M0(lambda eta: domain.params[0] * xp.ones(eta.shape, dtype=float)) - spaces_pre[1].assemble_M0(lambda eta: domain.params[1] * xp.ones(eta.shape, dtype=float)) - spaces_pre[2].assemble_M0(lambda eta: domain.params[2] ** 3 * xp.ones(eta.shape, dtype=float)) + spaces_pre[0].assemble_M0(lambda eta: domain.params[0] * np.ones(eta.shape, dtype=float)) + spaces_pre[1].assemble_M0(lambda eta: domain.params[1] * np.ones(eta.shape, dtype=float)) + spaces_pre[2].assemble_M0(lambda eta: domain.params[2] ** 3 * np.ones(eta.shape, dtype=float)) c33_pre = [spaces_pre[0].M0.toarray()[:, 0], spaces_pre[1].M0.toarray()[:, 0], spaces_pre[2].M0.toarray()[:, 0]] def solve(x): - x1, x2, x3 = xp.split(x, 3) + x1, x2, x3 = np.split(x, 3) x1 = x1.reshape(Nel_pre[0], Nel_pre[1], Nel_pre[2]) x2 = x2.reshape(Nel_pre[0], Nel_pre[1], Nel_pre[2]) @@ -217,7 +217,7 @@ def solve(x): r2 = linkron.kron_fftsolve_3d(c22_pre, x2).flatten() r3 = linkron.kron_fftsolve_3d(c33_pre, x3).flatten() - return xp.concatenate((r1, r2, r3)) + return np.concatenate((r1, r2, r3)) return spa.linalg.LinearOperator(shape=tensor_space_FEM.Mv.shape, matvec=solve) @@ -273,18 +273,16 @@ def get_M1_PRE_3(tensor_space_FEM, mats_pol=None): def solve(x): x1 = x[: tensor_space_FEM.E1_pol_0.shape[0] * tensor_space_FEM.NbaseN[2]].reshape( - tensor_space_FEM.E1_pol_0.shape[0], - tensor_space_FEM.NbaseN[2], + tensor_space_FEM.E1_pol_0.shape[0], tensor_space_FEM.NbaseN[2] ) x2 = x[tensor_space_FEM.E1_pol_0.shape[0] * tensor_space_FEM.NbaseN[2] :].reshape( - tensor_space_FEM.E0_pol_0.shape[0], - tensor_space_FEM.NbaseD[2], + tensor_space_FEM.E0_pol_0.shape[0], tensor_space_FEM.NbaseD[2] ) r1 = linkron.kron_fftsolve_2d(M1_pol_0_11_LU, tor_vec0, x1).flatten() r2 = linkron.kron_fftsolve_2d(M1_pol_0_22_LU, tor_vec1, x2).flatten() - return xp.concatenate((r1, r2)) + return np.concatenate((r1, r2)) return spa.linalg.LinearOperator(shape=tensor_space_FEM.M1_0.shape, matvec=solve) @@ -313,18 +311,16 @@ def get_M2_PRE_3(tensor_space_FEM, mats_pol=None): def solve(x): x1 = x[: tensor_space_FEM.E2_pol_0.shape[0] * tensor_space_FEM.NbaseD[2]].reshape( - tensor_space_FEM.E2_pol_0.shape[0], - tensor_space_FEM.NbaseD[2], + tensor_space_FEM.E2_pol_0.shape[0], tensor_space_FEM.NbaseD[2] ) x2 = x[tensor_space_FEM.E2_pol_0.shape[0] * tensor_space_FEM.NbaseD[2] :].reshape( - tensor_space_FEM.E3_pol_0.shape[0], - tensor_space_FEM.NbaseN[2], + tensor_space_FEM.E3_pol_0.shape[0], tensor_space_FEM.NbaseN[2] ) r1 = linkron.kron_fftsolve_2d(M2_pol_0_11_LU, tor_vec1, x1).flatten() r2 = linkron.kron_fftsolve_2d(M2_pol_0_22_LU, tor_vec0, x2).flatten() - return xp.concatenate((r1, r2)) + return np.concatenate((r1, r2)) return spa.linalg.LinearOperator(shape=tensor_space_FEM.M2_0.shape, matvec=solve) @@ -377,17 +373,15 @@ def get_Mv_PRE_3(tensor_space_FEM, mats_pol=None): def solve(x): x1 = x[: tensor_space_FEM.Ev_pol_0.shape[0] * tensor_space_FEM.NbaseN[2]].reshape( - tensor_space_FEM.Ev_pol_0.shape[0], - tensor_space_FEM.NbaseN[2], + tensor_space_FEM.Ev_pol_0.shape[0], tensor_space_FEM.NbaseN[2] ) x2 = x[tensor_space_FEM.Ev_pol_0.shape[0] * tensor_space_FEM.NbaseN[2] :].reshape( - tensor_space_FEM.E0_pol.shape[0], - tensor_space_FEM.NbaseN[2], + tensor_space_FEM.E0_pol.shape[0], tensor_space_FEM.NbaseN[2] ) r1 = linkron.kron_fftsolve_2d(Mv_pol_0_11_LU, tor_vec0, x1).flatten() r2 = linkron.kron_fftsolve_2d(Mv_pol_0_22_LU, tor_vec0, x2).flatten() - return xp.concatenate((r1, r2)) + return np.concatenate((r1, r2)) return spa.linalg.LinearOperator(shape=tensor_space_FEM.Mv_0.shape, matvec=solve) diff --git a/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_arrays.py b/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_arrays.py index 65faf9209..8514d25fc 100644 --- a/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_arrays.py +++ b/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_arrays.py @@ -1,13 +1,13 @@ import time import timeit -import cunumpy as xp import scipy.sparse as spa from psydac.ddm.mpi import mpi as MPI import struphy.geometry.mappings_3d as mapping3d import struphy.geometry.mappings_3d_fast as mapping_fast import struphy.linear_algebra.linalg_kernels as linalg +from struphy.utils.arrays import xp as np class Temp_arrays: @@ -39,67 +39,67 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): self.Ntot_1form = TENSOR_SPACE_FEM.Ntot_1form self.Ntot_2form = TENSOR_SPACE_FEM.Ntot_2form - self.b1_old = xp.empty(TENSOR_SPACE_FEM.Nbase_1form[0], dtype=float) - self.b2_old = xp.empty(TENSOR_SPACE_FEM.Nbase_1form[1], dtype=float) - self.b3_old = xp.empty(TENSOR_SPACE_FEM.Nbase_1form[2], dtype=float) + self.b1_old = np.empty(TENSOR_SPACE_FEM.Nbase_1form[0], dtype=float) + self.b2_old = np.empty(TENSOR_SPACE_FEM.Nbase_1form[1], dtype=float) + self.b3_old = np.empty(TENSOR_SPACE_FEM.Nbase_1form[2], dtype=float) - self.b1_iter = xp.empty(TENSOR_SPACE_FEM.Nbase_1form[0], dtype=float) - self.b2_iter = xp.empty(TENSOR_SPACE_FEM.Nbase_1form[1], dtype=float) - self.b3_iter = xp.empty(TENSOR_SPACE_FEM.Nbase_1form[2], dtype=float) + self.b1_iter = np.empty(TENSOR_SPACE_FEM.Nbase_1form[0], dtype=float) + self.b2_iter = np.empty(TENSOR_SPACE_FEM.Nbase_1form[1], dtype=float) + self.b3_iter = np.empty(TENSOR_SPACE_FEM.Nbase_1form[2], dtype=float) - self.temp_dft = xp.empty((3, 3), dtype=float) - self.temp_generate_weight1 = xp.empty(3, dtype=float) - self.temp_generate_weight2 = xp.empty(3, dtype=float) - self.temp_generate_weight3 = xp.empty(3, dtype=float) + self.temp_dft = np.empty((3, 3), dtype=float) + self.temp_generate_weight1 = np.empty(3, dtype=float) + self.temp_generate_weight2 = np.empty(3, dtype=float) + self.temp_generate_weight3 = np.empty(3, dtype=float) - self.zerosform_temp_long = xp.empty(TENSOR_SPACE_FEM.Ntot_0form, dtype=float) - self.oneform_temp1_long = xp.empty(TENSOR_SPACE_FEM.Ntot_1form[0], dtype=float) - self.oneform_temp2_long = xp.empty(TENSOR_SPACE_FEM.Ntot_1form[1], dtype=float) - self.oneform_temp3_long = xp.empty(TENSOR_SPACE_FEM.Ntot_1form[2], dtype=float) + self.zerosform_temp_long = np.empty(TENSOR_SPACE_FEM.Ntot_0form, dtype=float) + self.oneform_temp1_long = np.empty(TENSOR_SPACE_FEM.Ntot_1form[0], dtype=float) + self.oneform_temp2_long = np.empty(TENSOR_SPACE_FEM.Ntot_1form[1], dtype=float) + self.oneform_temp3_long = np.empty(TENSOR_SPACE_FEM.Ntot_1form[2], dtype=float) - self.oneform_temp_long = xp.empty( + self.oneform_temp_long = np.empty( TENSOR_SPACE_FEM.Ntot_1form[0] + TENSOR_SPACE_FEM.Ntot_1form[1] + TENSOR_SPACE_FEM.Ntot_1form[2], dtype=float, ) - self.twoform_temp1_long = xp.empty(TENSOR_SPACE_FEM.Ntot_2form[0], dtype=float) - self.twoform_temp2_long = xp.empty(TENSOR_SPACE_FEM.Ntot_2form[1], dtype=float) - self.twoform_temp3_long = xp.empty(TENSOR_SPACE_FEM.Ntot_2form[2], dtype=float) + self.twoform_temp1_long = np.empty(TENSOR_SPACE_FEM.Ntot_2form[0], dtype=float) + self.twoform_temp2_long = np.empty(TENSOR_SPACE_FEM.Ntot_2form[1], dtype=float) + self.twoform_temp3_long = np.empty(TENSOR_SPACE_FEM.Ntot_2form[2], dtype=float) - self.twoform_temp_long = xp.empty( + self.twoform_temp_long = np.empty( TENSOR_SPACE_FEM.Ntot_2form[0] + TENSOR_SPACE_FEM.Ntot_2form[1] + TENSOR_SPACE_FEM.Ntot_2form[2], dtype=float, ) - self.temp_twoform1 = xp.empty(TENSOR_SPACE_FEM.Nbase_2form[0], dtype=float) - self.temp_twoform2 = xp.empty(TENSOR_SPACE_FEM.Nbase_2form[1], dtype=float) - self.temp_twoform3 = xp.empty(TENSOR_SPACE_FEM.Nbase_2form[2], dtype=float) + self.temp_twoform1 = np.empty(TENSOR_SPACE_FEM.Nbase_2form[0], dtype=float) + self.temp_twoform2 = np.empty(TENSOR_SPACE_FEM.Nbase_2form[1], dtype=float) + self.temp_twoform3 = np.empty(TENSOR_SPACE_FEM.Nbase_2form[2], dtype=float) # arrays used to store intermidaite values - self.form_0_flatten = xp.empty(self.Ntot_0form, dtype=float) + self.form_0_flatten = np.empty(self.Ntot_0form, dtype=float) - self.form_1_1_flatten = xp.empty(self.Ntot_1form[0], dtype=float) - self.form_1_2_flatten = xp.empty(self.Ntot_1form[1], dtype=float) - self.form_1_3_flatten = xp.empty(self.Ntot_1form[2], dtype=float) + self.form_1_1_flatten = np.empty(self.Ntot_1form[0], dtype=float) + self.form_1_2_flatten = np.empty(self.Ntot_1form[1], dtype=float) + self.form_1_3_flatten = np.empty(self.Ntot_1form[2], dtype=float) - self.form_1_tot_flatten = xp.empty(self.Ntot_1form[0] + self.Ntot_1form[1] + self.Ntot_1form[2], dtype=float) + self.form_1_tot_flatten = np.empty(self.Ntot_1form[0] + self.Ntot_1form[1] + self.Ntot_1form[2], dtype=float) - self.form_2_1_flatten = xp.empty(self.Ntot_2form[0], dtype=float) - self.form_2_2_flatten = xp.empty(self.Ntot_2form[1], dtype=float) - self.form_2_3_flatten = xp.empty(self.Ntot_2form[2], dtype=float) + self.form_2_1_flatten = np.empty(self.Ntot_2form[0], dtype=float) + self.form_2_2_flatten = np.empty(self.Ntot_2form[1], dtype=float) + self.form_2_3_flatten = np.empty(self.Ntot_2form[2], dtype=float) - self.form_2_tot_flatten = xp.empty(self.Ntot_2form[0] + self.Ntot_2form[1] + self.Ntot_2form[2], dtype=float) + self.form_2_tot_flatten = np.empty(self.Ntot_2form[0] + self.Ntot_2form[1] + self.Ntot_2form[2], dtype=float) - self.bulkspeed_loc = xp.zeros((3, self.Nel[0], self.Nel[1], self.Nel[2]), dtype=float) - self.temperature_loc = xp.zeros((3, self.Nel[0], self.Nel[1], self.Nel[2]), dtype=float) - self.bulkspeed = xp.zeros((3, self.Nel[0], self.Nel[1], self.Nel[2]), dtype=float) + self.bulkspeed_loc = np.zeros((3, self.Nel[0], self.Nel[1], self.Nel[2]), dtype=float) + self.temperature_loc = np.zeros((3, self.Nel[0], self.Nel[1], self.Nel[2]), dtype=float) + self.bulkspeed = np.zeros((3, self.Nel[0], self.Nel[1], self.Nel[2]), dtype=float) if self.mpi_rank == 0: - temperature = xp.zeros((3, self.Nel[0], self.Nel[1], self.Nel[2]), dtype=float) + temperature = np.zeros((3, self.Nel[0], self.Nel[1], self.Nel[2]), dtype=float) else: temperature = None # values of magnetic fields at all quadrature points - self.LO_inv = xp.empty( + self.LO_inv = np.empty( ( self.Nel[0], self.Nel[1], @@ -111,7 +111,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) - self.LO_b1 = xp.empty( + self.LO_b1 = np.empty( ( self.Nel[0], self.Nel[1], @@ -122,7 +122,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.LO_b2 = xp.empty( + self.LO_b2 = np.empty( ( self.Nel[0], self.Nel[1], @@ -133,7 +133,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.LO_b3 = xp.empty( + self.LO_b3 = np.empty( ( self.Nel[0], self.Nel[1], @@ -145,7 +145,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) # values of weights (used in the linear operators) - self.LO_w1 = xp.empty( + self.LO_w1 = np.empty( ( self.Nel[0], self.Nel[1], @@ -156,7 +156,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.LO_w2 = xp.empty( + self.LO_w2 = np.empty( ( self.Nel[0], self.Nel[1], @@ -167,7 +167,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.LO_w3 = xp.empty( + self.LO_w3 = np.empty( ( self.Nel[0], self.Nel[1], @@ -179,7 +179,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) # values of a function (given its finite element coefficients) at all quadrature points - self.LO_r1 = xp.empty( + self.LO_r1 = np.empty( ( self.Nel[0], self.Nel[1], @@ -190,7 +190,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.LO_r2 = xp.empty( + self.LO_r2 = np.empty( ( self.Nel[0], self.Nel[1], @@ -201,7 +201,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.LO_r3 = xp.empty( + self.LO_r3 = np.empty( ( self.Nel[0], self.Nel[1], @@ -213,7 +213,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) # values of determinant of Jacobi matrix of the map at all quadrature points - self.df_det = xp.empty( + self.df_det = np.empty( ( self.Nel[0], self.Nel[1], @@ -225,8 +225,8 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) # when using delta f method, the values of current equilibrium at all quadrature points - if control: - self.Jeqx = xp.empty( + if control == True: + self.Jeqx = np.empty( ( self.Nel[0], self.Nel[1], @@ -237,7 +237,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.Jeqy = xp.empty( + self.Jeqy = np.empty( ( self.Nel[0], self.Nel[1], @@ -248,7 +248,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.Jeqz = xp.empty( + self.Jeqz = np.empty( ( self.Nel[0], self.Nel[1], @@ -260,7 +260,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) # values of DF and inverse of DF at all quadrature points - self.DF_11 = xp.empty( + self.DF_11 = np.empty( ( self.Nel[0], self.Nel[1], @@ -271,7 +271,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DF_12 = xp.empty( + self.DF_12 = np.empty( ( self.Nel[0], self.Nel[1], @@ -282,7 +282,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DF_13 = xp.empty( + self.DF_13 = np.empty( ( self.Nel[0], self.Nel[1], @@ -293,7 +293,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DF_21 = xp.empty( + self.DF_21 = np.empty( ( self.Nel[0], self.Nel[1], @@ -304,7 +304,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DF_22 = xp.empty( + self.DF_22 = np.empty( ( self.Nel[0], self.Nel[1], @@ -315,7 +315,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DF_23 = xp.empty( + self.DF_23 = np.empty( ( self.Nel[0], self.Nel[1], @@ -326,7 +326,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DF_31 = xp.empty( + self.DF_31 = np.empty( ( self.Nel[0], self.Nel[1], @@ -337,7 +337,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DF_32 = xp.empty( + self.DF_32 = np.empty( ( self.Nel[0], self.Nel[1], @@ -348,7 +348,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DF_33 = xp.empty( + self.DF_33 = np.empty( ( self.Nel[0], self.Nel[1], @@ -360,7 +360,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) - self.DFI_11 = xp.empty( + self.DFI_11 = np.empty( ( self.Nel[0], self.Nel[1], @@ -371,7 +371,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFI_12 = xp.empty( + self.DFI_12 = np.empty( ( self.Nel[0], self.Nel[1], @@ -382,7 +382,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFI_13 = xp.empty( + self.DFI_13 = np.empty( ( self.Nel[0], self.Nel[1], @@ -393,7 +393,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFI_21 = xp.empty( + self.DFI_21 = np.empty( ( self.Nel[0], self.Nel[1], @@ -404,7 +404,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFI_22 = xp.empty( + self.DFI_22 = np.empty( ( self.Nel[0], self.Nel[1], @@ -415,7 +415,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFI_23 = xp.empty( + self.DFI_23 = np.empty( ( self.Nel[0], self.Nel[1], @@ -426,7 +426,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFI_31 = xp.empty( + self.DFI_31 = np.empty( ( self.Nel[0], self.Nel[1], @@ -437,7 +437,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFI_32 = xp.empty( + self.DFI_32 = np.empty( ( self.Nel[0], self.Nel[1], @@ -448,7 +448,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFI_33 = xp.empty( + self.DFI_33 = np.empty( ( self.Nel[0], self.Nel[1], @@ -460,7 +460,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) - self.DFIT_11 = xp.empty( + self.DFIT_11 = np.empty( ( self.Nel[0], self.Nel[1], @@ -471,7 +471,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFIT_12 = xp.empty( + self.DFIT_12 = np.empty( ( self.Nel[0], self.Nel[1], @@ -482,7 +482,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFIT_13 = xp.empty( + self.DFIT_13 = np.empty( ( self.Nel[0], self.Nel[1], @@ -493,7 +493,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFIT_21 = xp.empty( + self.DFIT_21 = np.empty( ( self.Nel[0], self.Nel[1], @@ -504,7 +504,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFIT_22 = xp.empty( + self.DFIT_22 = np.empty( ( self.Nel[0], self.Nel[1], @@ -515,7 +515,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFIT_23 = xp.empty( + self.DFIT_23 = np.empty( ( self.Nel[0], self.Nel[1], @@ -526,7 +526,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFIT_31 = xp.empty( + self.DFIT_31 = np.empty( ( self.Nel[0], self.Nel[1], @@ -537,7 +537,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFIT_32 = xp.empty( + self.DFIT_32 = np.empty( ( self.Nel[0], self.Nel[1], @@ -548,7 +548,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.DFIT_33 = xp.empty( + self.DFIT_33 = np.empty( ( self.Nel[0], self.Nel[1], @@ -560,7 +560,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) - self.G_inv_11 = xp.empty( + self.G_inv_11 = np.empty( ( self.Nel[0], self.Nel[1], @@ -571,7 +571,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.G_inv_12 = xp.empty( + self.G_inv_12 = np.empty( ( self.Nel[0], self.Nel[1], @@ -582,7 +582,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.G_inv_13 = xp.empty( + self.G_inv_13 = np.empty( ( self.Nel[0], self.Nel[1], @@ -594,7 +594,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) - self.G_inv_22 = xp.empty( + self.G_inv_22 = np.empty( ( self.Nel[0], self.Nel[1], @@ -605,7 +605,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): ), dtype=float, ) - self.G_inv_23 = xp.empty( + self.G_inv_23 = np.empty( ( self.Nel[0], self.Nel[1], @@ -617,7 +617,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) - self.G_inv_33 = xp.empty( + self.G_inv_33 = np.empty( ( self.Nel[0], self.Nel[1], @@ -629,7 +629,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): dtype=float, ) - self.temp_particle = xp.empty(3, dtype=float) + self.temp_particle = np.empty(3, dtype=float) # initialization of DF and its inverse # ================ for mapping evaluation ================== # spline degrees @@ -638,34 +638,34 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): pf3 = DOMAIN.p[2] # pf + 1 non-vanishing basis functions up tp degree pf - b1f = xp.empty((pf1 + 1, pf1 + 1), dtype=float) - b2f = xp.empty((pf2 + 1, pf2 + 1), dtype=float) - b3f = xp.empty((pf3 + 1, pf3 + 1), dtype=float) + b1f = np.empty((pf1 + 1, pf1 + 1), dtype=float) + b2f = np.empty((pf2 + 1, pf2 + 1), dtype=float) + b3f = np.empty((pf3 + 1, pf3 + 1), dtype=float) # left and right values for spline evaluation - l1f = xp.empty(pf1, dtype=float) - l2f = xp.empty(pf2, dtype=float) - l3f = xp.empty(pf3, dtype=float) + l1f = np.empty(pf1, dtype=float) + l2f = np.empty(pf2, dtype=float) + l3f = np.empty(pf3, dtype=float) - r1f = xp.empty(pf1, dtype=float) - r2f = xp.empty(pf2, dtype=float) - r3f = xp.empty(pf3, dtype=float) + r1f = np.empty(pf1, dtype=float) + r2f = np.empty(pf2, dtype=float) + r3f = np.empty(pf3, dtype=float) # scaling arrays for M-splines - d1f = xp.empty(pf1, dtype=float) - d2f = xp.empty(pf2, dtype=float) - d3f = xp.empty(pf3, dtype=float) + d1f = np.empty(pf1, dtype=float) + d2f = np.empty(pf2, dtype=float) + d3f = np.empty(pf3, dtype=float) # pf + 1 derivatives - der1f = xp.empty(pf1 + 1, dtype=float) - der2f = xp.empty(pf2 + 1, dtype=float) - der3f = xp.empty(pf3 + 1, dtype=float) + der1f = np.empty(pf1 + 1, dtype=float) + der2f = np.empty(pf2 + 1, dtype=float) + der3f = np.empty(pf3 + 1, dtype=float) # needed mapping quantities - df = xp.empty((3, 3), dtype=float) - fx = xp.empty(3, dtype=float) - ginv = xp.empty((3, 3), dtype=float) - dfinv = xp.empty((3, 3), dtype=float) + df = np.empty((3, 3), dtype=float) + fx = np.empty(3, dtype=float) + ginv = np.empty((3, 3), dtype=float) + dfinv = np.empty((3, 3), dtype=float) for ie1 in range(self.Nel[0]): for ie2 in range(self.Nel[1]): @@ -761,7 +761,7 @@ def __init__(self, TENSOR_SPACE_FEM, DOMAIN, control, mpi_comm): self.df_det[ie1, ie2, ie3, q1, q2, q3] = det_number - if control: + if control == True: x1 = mapping3d.f( TENSOR_SPACE_FEM.pts[0][ie1, q1], TENSOR_SPACE_FEM.pts[1][ie2, q2], diff --git a/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_bv_kernel.py b/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_bv_kernel.py index e477940e6..5cf3830a4 100644 --- a/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_bv_kernel.py +++ b/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_bv_kernel.py @@ -226,9 +226,7 @@ def right_hand2( * bd2[ie2, il2, 0, q2] * bd3[ie3, il3, 0, q3] * temp_vector_1[ - N_index_x[ie1, il1], - D_index_y[ie2, il2], - D_index_z[ie3, il3], + N_index_x[ie1, il1], D_index_y[ie2, il2], D_index_z[ie3, il3] ] ) @@ -254,9 +252,7 @@ def right_hand2( * bn2[ie2, il2, 0, q2] * bd3[ie3, il3, 0, q3] * temp_vector_2[ - D_index_x[ie1, il1], - N_index_y[ie2, il2], - D_index_z[ie3, il3], + D_index_x[ie1, il1], N_index_y[ie2, il2], D_index_z[ie3, il3] ] ) @@ -282,9 +278,7 @@ def right_hand2( * bd2[ie2, il2, 0, q2] * bn3[ie3, il3, 0, q3] * temp_vector_3[ - D_index_x[ie1, il1], - D_index_y[ie2, il2], - N_index_z[ie3, il3], + D_index_x[ie1, il1], D_index_y[ie2, il2], N_index_z[ie3, il3] ] ) @@ -344,9 +338,7 @@ def right_hand1( * bn2[ie2, il2, 0, q2] * bn3[ie3, il3, 0, q3] * temp_vector_1[ - D_index_x[ie1, il1], - N_index_y[ie2, il2], - N_index_z[ie3, il3], + D_index_x[ie1, il1], N_index_y[ie2, il2], N_index_z[ie3, il3] ] ) @@ -372,9 +364,7 @@ def right_hand1( * bd2[ie2, il2, 0, q2] * bn3[ie3, il3, 0, q3] * temp_vector_2[ - N_index_x[ie1, il1], - D_index_y[ie2, il2], - N_index_z[ie3, il3], + N_index_x[ie1, il1], D_index_y[ie2, il2], N_index_z[ie3, il3] ] ) @@ -400,9 +390,7 @@ def right_hand1( * bn2[ie2, il2, 0, q2] * bd3[ie3, il3, 0, q3] * temp_vector_3[ - N_index_x[ie1, il1], - N_index_y[ie2, il2], - D_index_z[ie3, il3], + N_index_x[ie1, il1], N_index_y[ie2, il2], D_index_z[ie3, il3] ] ) diff --git a/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_massless_linear_operators.py b/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_massless_linear_operators.py index bdf01bf8b..843952a7b 100644 --- a/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_massless_linear_operators.py +++ b/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_massless_linear_operators.py @@ -1,11 +1,11 @@ import time -import cunumpy as xp import scipy.sparse as spa import struphy.feec.massless_operators.fB_bb_kernel as bb_kernel import struphy.feec.massless_operators.fB_bv_kernel as bv_kernel import struphy.feec.massless_operators.fB_vv_kernel as vv_kernel +from struphy.utils.arrays import xp as np class Massless_linear_operators: @@ -49,15 +49,15 @@ def linearoperator_step_vv(self, M2_PRE, M2, M1_PRE, M1, TEMP, ACC_VV): This function is used in substep vv with L2 projector. """ - dft = xp.empty((3, 3), dtype=float) - generate_weight1 = xp.zeros(3, dtype=float) - generate_weight2 = xp.zeros(3, dtype=float) - generate_weight3 = xp.zeros(3, dtype=float) + dft = np.empty((3, 3), dtype=float) + generate_weight1 = np.zeros(3, dtype=float) + generate_weight2 = np.zeros(3, dtype=float) + generate_weight3 = np.zeros(3, dtype=float) # =========================inverse of M1 =========================== - ACC_VV.temp1[:], ACC_VV.temp2[:], ACC_VV.temp3[:] = xp.split( + ACC_VV.temp1[:], ACC_VV.temp2[:], ACC_VV.temp3[:] = np.split( spa.linalg.cg( M1, - 1.0 / self.Np * xp.concatenate((ACC_VV.vec1.flatten(), ACC_VV.vec2.flatten(), ACC_VV.vec3.flatten())), + 1.0 / self.Np * np.concatenate((ACC_VV.vec1.flatten(), ACC_VV.vec2.flatten(), ACC_VV.vec3.flatten())), tol=10 ** (-14), M=M1_PRE, )[0], @@ -153,10 +153,10 @@ def linearoperator_step_vv(self, M2_PRE, M2, M1_PRE, M1, TEMP, ACC_VV): ) # =========================inverse of M1 =========================== - ACC_VV.temp1[:], ACC_VV.temp2[:], ACC_VV.temp3[:] = xp.split( + ACC_VV.temp1[:], ACC_VV.temp2[:], ACC_VV.temp3[:] = np.split( spa.linalg.cg( M1, - xp.concatenate((ACC_VV.one_form1.flatten(), ACC_VV.one_form2.flatten(), ACC_VV.one_form3.flatten())), + np.concatenate((ACC_VV.one_form1.flatten(), ACC_VV.one_form2.flatten(), ACC_VV.one_form3.flatten())), tol=10 ** (-14), M=M1_PRE, )[0], @@ -310,10 +310,10 @@ def linearoperator_pre_step_vv( indN = tensor_space_FEM.indN indD = tensor_space_FEM.indD - dft = xp.empty((3, 3), dtype=float) - generate_weight1 = xp.zeros(3, dtype=float) - generate_weight2 = xp.zeros(3, dtype=float) - generate_weight3 = xp.zeros(3, dtype=float) + dft = np.empty((3, 3), dtype=float) + generate_weight1 = np.zeros(3, dtype=float) + generate_weight2 = np.zeros(3, dtype=float) + generate_weight3 = np.zeros(3, dtype=float) vv_kernel.prepre( indN[0], @@ -716,17 +716,16 @@ def linearoperator_step3( Ntot_2form = tensor_space_FEM.Ntot_2form Nbase_2form = tensor_space_FEM.Nbase_2form - dft = xp.empty((3, 3), dtype=float) - generate_weight1 = xp.empty(3, dtype=float) - generate_weight2 = xp.empty(3, dtype=float) - generate_weight3 = xp.empty(3, dtype=float) + dft = np.empty((3, 3), dtype=float) + generate_weight1 = np.empty(3, dtype=float) + generate_weight2 = np.empty(3, dtype=float) + generate_weight3 = np.empty(3, dtype=float) # ================================================================== # ========================= C =========================== # time1 = time.time() - twoform_temp1_long[:], twoform_temp2_long[:], twoform_temp3_long[:] = xp.split( - tensor_space_FEM.C.dot(input_vector), - [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]], + twoform_temp1_long[:], twoform_temp2_long[:], twoform_temp3_long[:] = np.split( + tensor_space_FEM.C.dot(input_vector), [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]] ) temp_vector_1[:, :, :] = twoform_temp1_long.reshape(Nbase_2form[0]) temp_vector_2[:, :, :] = twoform_temp2_long.reshape(Nbase_2form[1]) @@ -827,7 +826,7 @@ def linearoperator_step3( # ========================= C.T =========================== # time1 = time.time() temp_final = tensor_space_FEM.M1.dot(input_vector) - dt / 2.0 * tensor_space_FEM.C.T.dot( - xp.concatenate((temp_vector_1.flatten(), temp_vector_2.flatten(), temp_vector_3.flatten())), + np.concatenate((temp_vector_1.flatten(), temp_vector_2.flatten(), temp_vector_3.flatten())) ) # time2 = time.time() # print('second_curl_time', time2 - time1) @@ -922,16 +921,15 @@ def linearoperator_right_step3( Ntot_2form = tensor_space_FEM.Ntot_2form Nbase_2form = tensor_space_FEM.Nbase_2form - dft = xp.empty((3, 3), dtype=float) - generate_weight1 = xp.empty(3, dtype=float) - generate_weight2 = xp.empty(3, dtype=float) - generate_weight3 = xp.empty(3, dtype=float) + dft = np.empty((3, 3), dtype=float) + generate_weight1 = np.empty(3, dtype=float) + generate_weight2 = np.empty(3, dtype=float) + generate_weight3 = np.empty(3, dtype=float) # ================================================================== # ========================= C =========================== - twoform_temp1_long[:], twoform_temp2_long[:], twoform_temp3_long[:] = xp.split( - tensor_space_FEM.C.dot(input_vector), - [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]], + twoform_temp1_long[:], twoform_temp2_long[:], twoform_temp3_long[:] = np.split( + tensor_space_FEM.C.dot(input_vector), [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]] ) temp_vector_1[:, :, :] = twoform_temp1_long.reshape(Nbase_2form[0]) temp_vector_2[:, :, :] = twoform_temp2_long.reshape(Nbase_2form[1]) @@ -1084,7 +1082,7 @@ def linearoperator_right_step3( # print('final_bb', time2 - time1) # ========================= C.T =========================== temp_final = tensor_space_FEM.M1.dot(input_vector) + dt / 2.0 * tensor_space_FEM.C.T.dot( - xp.concatenate((temp_vector_1.flatten(), temp_vector_2.flatten(), temp_vector_3.flatten())), + np.concatenate((temp_vector_1.flatten(), temp_vector_2.flatten(), temp_vector_3.flatten())) ) return temp_final @@ -1147,9 +1145,8 @@ def substep4_linear_operator( wts = tensor_space_FEM.wts # global quadrature weights # ========================================== - acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = xp.split( - tensor_space_FEM.C.dot(input), - [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]], + acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = np.split( + tensor_space_FEM.C.dot(input), [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]] ) acc.twoform_temp1[:, :, :] = acc.twoform_temp1_long.reshape(Nbase_2form[0]) acc.twoform_temp2[:, :, :] = acc.twoform_temp2_long.reshape(Nbase_2form[1]) @@ -1252,12 +1249,12 @@ def substep4_linear_operator( ) acc.oneform_temp_long[:] = spa.linalg.gmres( M1, - xp.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())), + np.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())), tol=10 ** (-10), M=M1_PRE, )[0] - acc.oneform_temp1_long[:], acc.oneform_temp2_long[:], acc.oneform_temp3_long[:] = xp.split( + acc.oneform_temp1_long[:], acc.oneform_temp2_long[:], acc.oneform_temp3_long[:] = np.split( spa.linalg.gmres(M1, mat.dot(acc.oneform_temp_long), tol=10 ** (-10), M=M1_PRE)[0], [Ntot_1form[0], Ntot_1form[0] + Ntot_1form[1]], ) @@ -1362,7 +1359,7 @@ def substep4_linear_operator( ) return M1.dot(input) + dt**2 / 4.0 * tensor_space_FEM.C.T.dot( - xp.concatenate((acc.twoform_temp1.flatten(), acc.twoform_temp2.flatten(), acc.twoform_temp3.flatten())), + np.concatenate((acc.twoform_temp1.flatten(), acc.twoform_temp2.flatten(), acc.twoform_temp3.flatten())) ) # ========================================================================================================== @@ -1425,8 +1422,8 @@ def substep4_linear_operator_right( wts = tensor_space_FEM.wts # global quadrature weights # ========================================== - acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = xp.split( - CURL.dot(xp.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))), + acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = np.split( + CURL.dot(np.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))), [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]], ) acc.twoform_temp1[:, :, :] = acc.twoform_temp1_long.reshape(Nbase_2form[0]) @@ -1532,13 +1529,13 @@ def substep4_linear_operator_right( acc.oneform_temp_long[:] = mat.dot( spa.linalg.gmres( M1, - xp.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())), + np.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())), tol=10 ** (-10), M=M1_PRE, - )[0], + )[0] ) - acc.oneform_temp1_long[:], acc.oneform_temp2_long[:], acc.oneform_temp3_long[:] = xp.split( + acc.oneform_temp1_long[:], acc.oneform_temp2_long[:], acc.oneform_temp3_long[:] = np.split( spa.linalg.gmres(M1, dt**2.0 / 4.0 * acc.oneform_temp_long + dt * vec, tol=10 ** (-10), M=M1_PRE)[0], [Ntot_1form[0], Ntot_1form[0] + Ntot_1form[1]], ) @@ -1642,8 +1639,8 @@ def substep4_linear_operator_right( tensor_space_FEM.basisD[2], ) - return M1.dot(xp.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))) - CURL.T.dot( - xp.concatenate((acc.twoform_temp1.flatten(), acc.twoform_temp2.flatten(), acc.twoform_temp3.flatten())), + return M1.dot(np.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))) - CURL.T.dot( + np.concatenate((acc.twoform_temp1.flatten(), acc.twoform_temp2.flatten(), acc.twoform_temp3.flatten())) ) # ========================================================================================================== @@ -1795,8 +1792,8 @@ def substep4_pusher_field( wts = tensor_space_FEM.wts # global quadrature weights # ========================================== - acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = xp.split( - CURL.dot(xp.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))), + acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = np.split( + CURL.dot(np.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))), [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]], ) acc.twoform_temp1[:, :, :] = acc.twoform_temp1_long.reshape(Nbase_2form[0]) @@ -1901,7 +1898,7 @@ def substep4_pusher_field( return spa.linalg.cg( M1, - xp.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())), + np.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())), tol=10 ** (-13), M=M1_PRE, )[0] @@ -1963,9 +1960,8 @@ def substep4_localproj_linear_operator( wts = tensor_space_FEM.wts # global quadrature weights # ========================================== - acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = xp.split( - tensor_space_FEM.C.dot(input), - [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]], + acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = np.split( + tensor_space_FEM.C.dot(input), [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]] ) acc.twoform_temp1[:, :, :] = acc.twoform_temp1_long.reshape(Nbase_2form[0]) acc.twoform_temp2[:, :, :] = acc.twoform_temp2_long.reshape(Nbase_2form[1]) @@ -2067,9 +2063,9 @@ def substep4_localproj_linear_operator( tensor_space_FEM.basisD[2], ) - acc.oneform_temp1_long[:], acc.oneform_temp2_long[:], acc.oneform_temp3_long[:] = xp.split( + acc.oneform_temp1_long[:], acc.oneform_temp2_long[:], acc.oneform_temp3_long[:] = np.split( mat.dot( - xp.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())), + np.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())) ), [Ntot_1form[0], Ntot_1form[0] + Ntot_1form[1]], ) @@ -2175,7 +2171,7 @@ def substep4_localproj_linear_operator( ) return M1.dot(input) + dt**2 / 4.0 * tensor_space_FEM.C.T.dot( - xp.concatenate((acc.twoform_temp1.flatten(), acc.twoform_temp2.flatten(), acc.twoform_temp3.flatten())), + np.concatenate((acc.twoform_temp1.flatten(), acc.twoform_temp2.flatten(), acc.twoform_temp3.flatten())) ) # ========================================================================================================== @@ -2238,8 +2234,8 @@ def substep4_localproj_linear_operator_right( wts = tensor_space_FEM.wts # global quadrature weights # ========================================== - acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = xp.split( - CURL.dot(xp.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))), + acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = np.split( + CURL.dot(np.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))), [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]], ) acc.twoform_temp1[:, :, :] = acc.twoform_temp1_long.reshape(Nbase_2form[0]) @@ -2343,12 +2339,11 @@ def substep4_localproj_linear_operator_right( tensor_space_FEM.basisD[2], ) acc.oneform_temp_long[:] = mat.dot( - xp.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())), + np.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())) ) - acc.oneform_temp1_long[:], acc.oneform_temp2_long[:], acc.oneform_temp3_long[:] = xp.split( - (dt**2.0 / 4.0 * acc.oneform_temp_long + dt * vec), - [Ntot_1form[0], Ntot_1form[0] + Ntot_1form[1]], + acc.oneform_temp1_long[:], acc.oneform_temp2_long[:], acc.oneform_temp3_long[:] = np.split( + (dt**2.0 / 4.0 * acc.oneform_temp_long + dt * vec), [Ntot_1form[0], Ntot_1form[0] + Ntot_1form[1]] ) acc.oneform_temp1[:, :, :] = acc.oneform_temp1_long.reshape(Nbase_1form[0]) @@ -2451,8 +2446,8 @@ def substep4_localproj_linear_operator_right( tensor_space_FEM.basisD[2], ) - return M1.dot(xp.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))) - CURL.T.dot( - xp.concatenate((acc.twoform_temp1.flatten(), acc.twoform_temp2.flatten(), acc.twoform_temp3.flatten())), + return M1.dot(np.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))) - CURL.T.dot( + np.concatenate((acc.twoform_temp1.flatten(), acc.twoform_temp2.flatten(), acc.twoform_temp3.flatten())) ) # ========================================================================================================== @@ -2514,8 +2509,8 @@ def substep4_localproj_pusher_field( wts = tensor_space_FEM.wts # global quadrature weights # ========================================== - acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = xp.split( - CURL.dot(xp.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))), + acc.twoform_temp1_long[:], acc.twoform_temp2_long[:], acc.twoform_temp3_long[:] = np.split( + CURL.dot(np.concatenate((bb1.flatten(), bb2.flatten(), bb3.flatten()))), [Ntot_2form[0], Ntot_2form[0] + Ntot_2form[1]], ) acc.twoform_temp1[:, :, :] = acc.twoform_temp1_long.reshape(Nbase_2form[0]) @@ -2618,4 +2613,4 @@ def substep4_localproj_pusher_field( tensor_space_FEM.basisD[2], ) - return xp.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())) + return np.concatenate((acc.oneform_temp1.flatten(), acc.oneform_temp2.flatten(), acc.oneform_temp3.flatten())) diff --git a/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_vv_kernel.py b/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_vv_kernel.py index 1ef3376dc..216103640 100644 --- a/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_vv_kernel.py +++ b/src/struphy/eigenvalue_solvers/legacy/massless_operators/fB_vv_kernel.py @@ -1,9 +1,9 @@ -import cunumpy as xp from numpy import empty, exp, floor, zeros import struphy.bsplines.bsplines_kernels as bsp import struphy.geometry.mappings_kernels as mapping_fast import struphy.linear_algebra.linalg_kernels as linalg +from struphy.utils.arrays import xp as np # ========================================================================================== @@ -542,8 +542,7 @@ def piecewise_gather( for q2 in range(n_quad[1]): for q3 in range(n_quad[2]): temp1[0] = (cell_left[0] + il1) / Nel[0] + pts1[ - 0, - q1, + 0, q1 ] # quadrature points in the cell x direction temp4[0] = abs(temp1[0] - eta1) - compact[0] / 2.0 # if > 0, result is 0 @@ -742,8 +741,7 @@ def piecewise_scatter( for q2 in range(n_quad[1]): for q3 in range(n_quad[2]): temp1[0] = (cell_left[0] + il1) / Nel[0] + pts1[ - 0, - q1, + 0, q1 ] # quadrature points in the cell x direction temp4[0] = abs(temp1[0] - eta1) - compact[0] / 2 # if > 0, result is 0 diff --git a/src/struphy/eigenvalue_solvers/legacy/mhd_operators_MF.py b/src/struphy/eigenvalue_solvers/legacy/mhd_operators_MF.py index 560314458..9591fe06b 100644 --- a/src/struphy/eigenvalue_solvers/legacy/mhd_operators_MF.py +++ b/src/struphy/eigenvalue_solvers/legacy/mhd_operators_MF.py @@ -1,9 +1,9 @@ -import cunumpy as xp import scipy.sparse as spa from struphy.eigenvalue_solvers.projectors_global import Projectors_tensor_3d from struphy.eigenvalue_solvers.spline_space import Tensor_spline_space from struphy.linear_algebra.linalg_kron import kron_matvec_3d, kron_solve_3d +from struphy.utils.arrays import xp as np # ================================================================================================= @@ -107,9 +107,9 @@ def __init__(self, space, eq_MHD): self.pts1_D_2 = self.space.spaces[1].projectors.D_pts self.pts1_D_3 = self.space.spaces[2].projectors.D_pts - # assert xp.allclose(self.N_1.toarray(), self.pts0_N_1.toarray(), atol=1e-14) - # assert xp.allclose(self.N_2.toarray(), self.pts0_N_2.toarray(), atol=1e-14) - # assert xp.allclose(self.N_3.toarray(), self.pts0_N_3.toarray(), atol=1e-14) + # assert np.allclose(self.N_1.toarray(), self.pts0_N_1.toarray(), atol=1e-14) + # assert np.allclose(self.N_2.toarray(), self.pts0_N_2.toarray(), atol=1e-14) + # assert np.allclose(self.N_3.toarray(), self.pts0_N_3.toarray(), atol=1e-14) # ===== call equilibrium_mhd values at the projection points ===== # projection points @@ -210,11 +210,11 @@ def __init__(self, space, eq_MHD): # # Operator A # if self.basis_u == 1: # self.A = spa.linalg.LinearOperator((self.dim_1, self.dim_1), matvec = lambda x : (self.M1.dot(self.W1_dot(x)) + self.transpose_W1_dot(self.M1.dot(x))) / 2 ) - # self.A_mat = spa.csc_matrix(self.A.dot(xp.identity(self.dim_1))) + # self.A_mat = spa.csc_matrix(self.A.dot(np.identity(self.dim_1))) # elif self.basis_u == 2: # self.A = spa.linalg.LinearOperator((self.dim_2, self.dim_2), matvec = lambda x : (self.M2.dot(self.Q2_dot(x)) + self.transpose_Q2_dot(self.M2.dot(x))) / 2 ) - # self.A_mat = spa.csc_matrix(self.A.dot(xp.identity(self.dim_2))) + # self.A_mat = spa.csc_matrix(self.A.dot(np.identity(self.dim_2))) # self.A_inv = spa.linalg.inv(self.A_mat) @@ -228,12 +228,12 @@ def Q1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^1} Returns ---------- - res : xp.array + res : np.array dim R^{N^2} Notes @@ -320,7 +320,7 @@ def Q1_dot(self, x): # xi3 : histo(xi1)-histo(xi2)-inter(xi3)-polation. res_3 = self.space.projectors.PI_mat("23", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ================================================================== def transpose_Q1_dot(self, x): @@ -329,12 +329,12 @@ def transpose_Q1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- - res : xp.array + res : np.array dim R^{N^1} Notes @@ -403,7 +403,7 @@ def transpose_Q1_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # =================================================================== def W1_dot(self, x): @@ -412,12 +412,12 @@ def W1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^1} Returns ---------- - res : xp.array + res : np.array dim R^{N^1} Notes @@ -482,7 +482,7 @@ def W1_dot(self, x): # xi3 : inter(xi1)-inter(xi2)-histo(xi3)-polation. res_3 = self.space.projectors.PI_mat("13", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # =================================================================== def transpose_W1_dot(self, x): @@ -491,12 +491,12 @@ def transpose_W1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^1} Returns ---------- - res : xp.array + res : np.array dim R{N^1} Notes @@ -545,7 +545,7 @@ def transpose_W1_dot(self, x): res_2 = kron_matvec_3d([self.pts0_N_1.T, self.pts1_D_2.T, self.pts0_N_3.T], mat_f_2_c) res_3 = kron_matvec_3d([self.pts0_N_1.T, self.pts0_N_2.T, self.pts1_D_3.T], mat_f_3_c) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def U1_dot(self, x): @@ -554,12 +554,12 @@ def U1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^1} Returns ---------- - res : xp.array + res : np.array dim R^{N^2} Notes @@ -645,7 +645,7 @@ def U1_dot(self, x): # xi3 : histo(xi1)-histo(xi2)-inter(xi3)-polation. res_3 = self.space.projectors.PI_mat("23", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_U1_dot(self, x): @@ -654,12 +654,12 @@ def transpose_U1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^2} Returns ---------- - res : xp.array + res : np.array dim R{N^1} Notes @@ -728,7 +728,7 @@ def transpose_U1_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def P1_dot(self, x): @@ -737,12 +737,12 @@ def P1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- - res : xp.array + res : np.array dim R^{N^1} Notes @@ -831,7 +831,7 @@ def P1_dot(self, x): # xi3 : inter(xi1)-inter(xi2)-histo(xi3)-polation. res_3 = self.space.projectors.PI_mat("13", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_P1_dot(self, x): @@ -840,12 +840,12 @@ def transpose_P1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^1} Returns ---------- - res : xp.array + res : np.array dim R{N^2} Notes @@ -914,7 +914,7 @@ def transpose_P1_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def S1_dot(self, x): @@ -923,12 +923,12 @@ def S1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^1} Returns ---------- - res : xp.array + res : np.array dim R^{N^2} Notes @@ -1015,7 +1015,7 @@ def S1_dot(self, x): # xi3 : histo(xi1)-histo(xi2)-inter(xi3)-polation. res_3 = self.space.projectors.PI_mat("23", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_S1_dot(self, x): @@ -1024,12 +1024,12 @@ def transpose_S1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^2} Returns ---------- - res : xp.array + res : np.array dim R{N^1} Notes @@ -1098,7 +1098,7 @@ def transpose_S1_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # =================================================================== def S10_dot(self, x): @@ -1107,12 +1107,12 @@ def S10_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^1} Returns ---------- - res : xp.array + res : np.array dim R^{N^1} Notes @@ -1178,7 +1178,7 @@ def S10_dot(self, x): # xi3 : inter(xi1)-inter(xi2)-histo(xi3)-polation. res_3 = self.space.projectors.PI_mat("13", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # =================================================================== def transpose_S10_dot(self, x): @@ -1187,12 +1187,12 @@ def transpose_S10_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^1} Returns ---------- - res : xp.array + res : np.array dim R{N^1} Notes @@ -1241,7 +1241,7 @@ def transpose_S10_dot(self, x): res_2 = kron_matvec_3d([self.pts0_N_1.T, self.pts1_D_2.T, self.pts0_N_3.T], mat_f_2_c) res_3 = kron_matvec_3d([self.pts0_N_1.T, self.pts0_N_2.T, self.pts1_D_3.T], mat_f_3_c) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ================================================================= def K1_dot(self, x): @@ -1250,12 +1250,12 @@ def K1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^3} Returns ---------- - res : xp.array + res : np.array dim R^{N^3} Notes @@ -1307,7 +1307,7 @@ def transpose_K1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^3} Returns @@ -1350,12 +1350,12 @@ def K10_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^0} Returns ---------- - res : xp.array + res : np.array dim R^{N^0} Notes @@ -1406,7 +1406,7 @@ def transpose_K10_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^0} Returns @@ -1449,12 +1449,12 @@ def T1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^1} Returns ---------- - res : xp.array + res : np.array dim R^{N^1} Notes @@ -1543,7 +1543,7 @@ def T1_dot(self, x): # xi3 : inter(xi1)-inter(xi2)-histo(xi3)-polation. res_3 = self.space.projectors.PI_mat("13", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ================================================================= def transpose_T1_dot(self, x): @@ -1552,12 +1552,12 @@ def transpose_T1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^1} Returns ---------- - res : xp.array + res : np.array dim R{N^1} Notes @@ -1626,7 +1626,7 @@ def transpose_T1_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ================================================================= def X1_dot(self, x): @@ -1635,13 +1635,13 @@ def X1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^1} Returns ---------- res : list - 3 xp.arrays of dim R^{N^0} + 3 np.arrays of dim R^{N^0} Notes ----- @@ -1718,12 +1718,12 @@ def transpose_X1_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^0 x 3} Returns ---------- - res : xp.array + res : np.array dim R{N^1} Notes @@ -1738,9 +1738,9 @@ def transpose_X1_dot(self, x): # x dim check # x should be R{N^0 * 3} # assert len(x) == self.space.Ntot_0form * 3 - # x_loc_1 = self.space.extract_0(xp.split(x,3)[0]) - # x_loc_2 = self.space.extract_0(xp.split(x,3)[1]) - # x_loc_3 = self.space.extract_0(xp.split(x,3)[2]) + # x_loc_1 = self.space.extract_0(np.split(x,3)[0]) + # x_loc_2 = self.space.extract_0(np.split(x,3)[1]) + # x_loc_3 = self.space.extract_0(np.split(x,3)[2]) # x_loc = list((x_loc_1, x_loc_2, x_loc_3)) x_loc_1 = self.space.extract_0(x[0]) @@ -1794,7 +1794,7 @@ def transpose_X1_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) ######################################## ########## 2-form formulation ########## @@ -1806,12 +1806,12 @@ def Q2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- - res : xp.array + res : np.array dim R^{N^2} Notes @@ -1882,7 +1882,7 @@ def Q2_dot(self, x): # xi3 : histo(xi1)-histo(xi2)-inter(xi3)-polation. res_3 = self.space.projectors.PI_mat("23", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_Q2_dot(self, x): @@ -1891,12 +1891,12 @@ def transpose_Q2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^2} Returns ---------- - res : xp.array + res : np.array dim R{N^2} Notes @@ -1946,7 +1946,7 @@ def transpose_Q2_dot(self, x): res_2 = kron_matvec_3d([self.pts1_D_1.T, self.pts0_N_2.T, self.pts1_D_3.T], mat_f_2_c) res_3 = kron_matvec_3d([self.pts1_D_1.T, self.pts1_D_2.T, self.pts0_N_3.T], mat_f_3_c) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def T2_dot(self, x): @@ -1955,12 +1955,12 @@ def T2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- - res : xp.array + res : np.array dim R^{N^1} Notes @@ -2049,7 +2049,7 @@ def T2_dot(self, x): # xi3 : inter(xi1)-inter(xi2)-histo(xi3)-polation. res_3 = self.space.projectors.PI_mat("13", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_T2_dot(self, x): @@ -2058,12 +2058,12 @@ def transpose_T2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^1} Returns ---------- - res : xp.array + res : np.array dim R{N^2} Notes @@ -2132,7 +2132,7 @@ def transpose_T2_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def P2_dot(self, x): @@ -2141,12 +2141,12 @@ def P2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- - res : xp.array + res : np.array dim R^{N^2} Notes @@ -2235,7 +2235,7 @@ def P2_dot(self, x): # xi3 : histo(xi1)-histo(xi2)-inter(xi3)-polation. res_3 = self.space.projectors.PI_mat("23", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_P2_dot(self, x): @@ -2244,12 +2244,12 @@ def transpose_P2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^2} Returns ---------- - res : xp.array + res : np.array dim R{N^2} Notes @@ -2317,7 +2317,7 @@ def transpose_P2_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def S2_dot(self, x): @@ -2326,12 +2326,12 @@ def S2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- - res : xp.array + res : np.array dim R^{N^2} Notes @@ -2402,7 +2402,7 @@ def S2_dot(self, x): # xi3 : histo(xi1)-histo(xi2)-inter(xi3)-polation. res_3 = self.space.projectors.PI_mat("23", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_S2_dot(self, x): @@ -2411,12 +2411,12 @@ def transpose_S2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^2} Returns ---------- - res : xp.array + res : np.array dim R{N^2} Notes @@ -2466,7 +2466,7 @@ def transpose_S2_dot(self, x): res_2 = kron_matvec_3d([self.pts1_D_1.T, self.pts0_N_2.T, self.pts1_D_3.T], mat_f_2_c) res_3 = kron_matvec_3d([self.pts1_D_1.T, self.pts1_D_2.T, self.pts0_N_3.T], mat_f_3_c) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def K2_dot(self, x): @@ -2475,12 +2475,12 @@ def K2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^3} Returns ---------- - res : xp.array + res : np.array dim R^{N^3} Notes @@ -2532,7 +2532,7 @@ def transpose_K2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^3} Returns @@ -2575,13 +2575,13 @@ def X2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- res : list - 3 xp.arrays of dim R^{N^0} + 3 np.arrays of dim R^{N^0} Notes ----- @@ -2658,12 +2658,12 @@ def transpose_X2_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^0 x 3} Returns ---------- - res : xp.array + res : np.array dim R{N^2} Notes @@ -2678,9 +2678,9 @@ def transpose_X2_dot(self, x): # x dim check # x should be R{N^0 * 3} # assert len(x) == self.space.Ntot_0form * 3 - # x_loc_1 = self.space.extract_0(xp.split(x,3)[0]) - # x_loc_2 = self.space.extract_0(xp.split(x,3)[1]) - # x_loc_3 = self.space.extract_0(xp.split(x,3)[2]) + # x_loc_1 = self.space.extract_0(np.split(x,3)[0]) + # x_loc_2 = self.space.extract_0(np.split(x,3)[1]) + # x_loc_3 = self.space.extract_0(np.split(x,3)[2]) # x_loc = list((x_loc_1, x_loc_2, x_loc_3)) x_loc_1 = self.space.extract_0(x[0]) @@ -2734,7 +2734,7 @@ def transpose_X2_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def Z20_dot(self, x): @@ -2743,12 +2743,12 @@ def Z20_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- - res : xp.array + res : np.array dim R^{N^1} Notes @@ -2835,7 +2835,7 @@ def Z20_dot(self, x): # xi3 : inter(xi1)-inter(xi2)-histo(xi3)-polation. res_3 = self.space.projectors.PI_mat("13", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_Z20_dot(self, x): @@ -2844,12 +2844,12 @@ def transpose_Z20_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^2} Returns ---------- - res : xp.array + res : np.array dim R{N^1} Notes @@ -2918,7 +2918,7 @@ def transpose_Z20_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def Y20_dot(self, x): @@ -2927,12 +2927,12 @@ def Y20_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^0} Returns ---------- - res : xp.array + res : np.array dim R^{N^3} Notes @@ -2984,12 +2984,12 @@ def transpose_Y20_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^3} Returns ---------- - res : xp.array + res : np.array dim R{N^0} Notes @@ -3027,12 +3027,12 @@ def S20_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R^{N^2} Returns ---------- - res : xp.array + res : np.array dim R^{N^1} Notes @@ -3119,7 +3119,7 @@ def S20_dot(self, x): # xi3 : inter(xi1)-inter(xi2)-histo(xi3)-polation. res_3 = self.space.projectors.PI_mat("13", DOF_3) - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) # ==================================================================== def transpose_S20_dot(self, x): @@ -3128,12 +3128,12 @@ def transpose_S20_dot(self, x): Parameters ---------- - x : xp.array + x : np.array dim R{N^1} Returns ---------- - res : xp.array + res : np.array dim R{N^2} Notes @@ -3202,4 +3202,4 @@ def transpose_S20_dot(self, x): res_2 = res_12 + res_22 + res_32 res_3 = res_13 + res_23 + res_33 - return xp.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) + return np.concatenate((res_1.flatten(), res_2.flatten(), res_3.flatten())) diff --git a/src/struphy/eigenvalue_solvers/legacy/projectors_local/pro_local/mhd_operators_3d_local.py b/src/struphy/eigenvalue_solvers/legacy/projectors_local/pro_local/mhd_operators_3d_local.py index 6734a11b0..bd840b3f0 100644 --- a/src/struphy/eigenvalue_solvers/legacy/projectors_local/pro_local/mhd_operators_3d_local.py +++ b/src/struphy/eigenvalue_solvers/legacy/projectors_local/pro_local/mhd_operators_3d_local.py @@ -8,13 +8,13 @@ import sys -import cunumpy as xp import scipy.sparse as spa import source_run.kernels_projectors_evaluation as ker_eva import struphy.feec.basics.kernels_3d as ker_loc_3d import struphy.feec.bsplines as bsp import struphy.feec.projectors.pro_local.kernels_projectors_local_mhd as ker_loc +from struphy.utils.arrays import xp as np class projectors_local_mhd: @@ -44,87 +44,87 @@ def __init__(self, tensor_space, n_quad): self.n_quad = n_quad # number of quadrature point per integration interval # Gauss - Legendre quadrature points and weights in (-1, 1) - self.pts_loc = [xp.polynomial.legendre.leggauss(n_quad)[0] for n_quad in self.n_quad] - self.wts_loc = [xp.polynomial.legendre.leggauss(n_quad)[1] for n_quad in self.n_quad] + self.pts_loc = [np.polynomial.legendre.leggauss(n_quad)[0] for n_quad in self.n_quad] + self.wts_loc = [np.polynomial.legendre.leggauss(n_quad)[1] for n_quad in self.n_quad] # set interpolation and histopolation coefficients self.coeff_i = [0, 0, 0] self.coeff_h = [0, 0, 0] for a in range(3): - if self.bc[a]: - self.coeff_i[a] = xp.zeros((1, 2 * self.p[a] - 1), dtype=float) - self.coeff_h[a] = xp.zeros((1, 2 * self.p[a]), dtype=float) + if self.bc[a] == True: + self.coeff_i[a] = np.zeros((1, 2 * self.p[a] - 1), dtype=float) + self.coeff_h[a] = np.zeros((1, 2 * self.p[a]), dtype=float) if self.p[a] == 1: - self.coeff_i[a][0, :] = xp.array([1.0]) - self.coeff_h[a][0, :] = xp.array([1.0, 1.0]) + self.coeff_i[a][0, :] = np.array([1.0]) + self.coeff_h[a][0, :] = np.array([1.0, 1.0]) elif self.p[a] == 2: - self.coeff_i[a][0, :] = 1 / 2 * xp.array([-1.0, 4.0, -1.0]) - self.coeff_h[a][0, :] = 1 / 2 * xp.array([-1.0, 3.0, 3.0, -1.0]) + self.coeff_i[a][0, :] = 1 / 2 * np.array([-1.0, 4.0, -1.0]) + self.coeff_h[a][0, :] = 1 / 2 * np.array([-1.0, 3.0, 3.0, -1.0]) elif self.p[a] == 3: - self.coeff_i[a][0, :] = 1 / 6 * xp.array([1.0, -8.0, 20.0, -8.0, 1.0]) - self.coeff_h[a][0, :] = 1 / 6 * xp.array([1.0, -7.0, 12.0, 12.0, -7.0, 1.0]) + self.coeff_i[a][0, :] = 1 / 6 * np.array([1.0, -8.0, 20.0, -8.0, 1.0]) + self.coeff_h[a][0, :] = 1 / 6 * np.array([1.0, -7.0, 12.0, 12.0, -7.0, 1.0]) elif self.p[a] == 4: - self.coeff_i[a][0, :] = 2 / 45 * xp.array([-1.0, 16.0, -295 / 4, 140.0, -295 / 4, 16.0, -1.0]) + self.coeff_i[a][0, :] = 2 / 45 * np.array([-1.0, 16.0, -295 / 4, 140.0, -295 / 4, 16.0, -1.0]) self.coeff_h[a][0, :] = ( - 2 / 45 * xp.array([-1.0, 15.0, -231 / 4, 265 / 4, 265 / 4, -231 / 4, 15.0, -1.0]) + 2 / 45 * np.array([-1.0, 15.0, -231 / 4, 265 / 4, 265 / 4, -231 / 4, 15.0, -1.0]) ) else: print("degree > 4 not implemented!") else: - self.coeff_i[a] = xp.zeros((2 * self.p[a] - 1, 2 * self.p[a] - 1), dtype=float) - self.coeff_h[a] = xp.zeros((2 * self.p[a] - 1, 2 * self.p[a]), dtype=float) + self.coeff_i[a] = np.zeros((2 * self.p[a] - 1, 2 * self.p[a] - 1), dtype=float) + self.coeff_h[a] = np.zeros((2 * self.p[a] - 1, 2 * self.p[a]), dtype=float) if self.p[a] == 1: - self.coeff_i[a][0, :] = xp.array([1.0]) - self.coeff_h[a][0, :] = xp.array([1.0, 1.0]) + self.coeff_i[a][0, :] = np.array([1.0]) + self.coeff_h[a][0, :] = np.array([1.0, 1.0]) elif self.p[a] == 2: - self.coeff_i[a][0, :] = 1 / 2 * xp.array([2.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 2 * xp.array([-1.0, 4.0, -1.0]) - self.coeff_i[a][2, :] = 1 / 2 * xp.array([0.0, 0.0, 2.0]) + self.coeff_i[a][0, :] = 1 / 2 * np.array([2.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 2 * np.array([-1.0, 4.0, -1.0]) + self.coeff_i[a][2, :] = 1 / 2 * np.array([0.0, 0.0, 2.0]) - self.coeff_h[a][0, :] = 1 / 2 * xp.array([3.0, -1.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 2 * xp.array([-1.0, 3.0, 3.0, -1.0]) - self.coeff_h[a][2, :] = 1 / 2 * xp.array([0.0, 0.0, -1.0, 3.0]) + self.coeff_h[a][0, :] = 1 / 2 * np.array([3.0, -1.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 2 * np.array([-1.0, 3.0, 3.0, -1.0]) + self.coeff_h[a][2, :] = 1 / 2 * np.array([0.0, 0.0, -1.0, 3.0]) elif self.p[a] == 3: - self.coeff_i[a][0, :] = 1 / 18 * xp.array([18.0, 0.0, 0.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 18 * xp.array([-5.0, 40.0, -24.0, 8.0, -1.0]) - self.coeff_i[a][2, :] = 1 / 18 * xp.array([3.0, -24.0, 60.0, -24.0, 3.0]) - self.coeff_i[a][3, :] = 1 / 18 * xp.array([-1.0, 8.0, -24.0, 40.0, -5.0]) - self.coeff_i[a][4, :] = 1 / 18 * xp.array([0.0, 0.0, 0.0, 0.0, 18.0]) - - self.coeff_h[a][0, :] = 1 / 18 * xp.array([23.0, -17.0, 7.0, -1.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 18 * xp.array([-8.0, 56.0, -28.0, 4.0, 0.0, 0.0]) - self.coeff_h[a][2, :] = 1 / 18 * xp.array([3.0, -21.0, 36.0, 36.0, -21.0, 3.0]) - self.coeff_h[a][3, :] = 1 / 18 * xp.array([0.0, 0.0, 4.0, -28.0, 56.0, -8.0]) - self.coeff_h[a][4, :] = 1 / 18 * xp.array([0.0, 0.0, -1.0, 7.0, -17.0, 23.0]) + self.coeff_i[a][0, :] = 1 / 18 * np.array([18.0, 0.0, 0.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 18 * np.array([-5.0, 40.0, -24.0, 8.0, -1.0]) + self.coeff_i[a][2, :] = 1 / 18 * np.array([3.0, -24.0, 60.0, -24.0, 3.0]) + self.coeff_i[a][3, :] = 1 / 18 * np.array([-1.0, 8.0, -24.0, 40.0, -5.0]) + self.coeff_i[a][4, :] = 1 / 18 * np.array([0.0, 0.0, 0.0, 0.0, 18.0]) + + self.coeff_h[a][0, :] = 1 / 18 * np.array([23.0, -17.0, 7.0, -1.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 18 * np.array([-8.0, 56.0, -28.0, 4.0, 0.0, 0.0]) + self.coeff_h[a][2, :] = 1 / 18 * np.array([3.0, -21.0, 36.0, 36.0, -21.0, 3.0]) + self.coeff_h[a][3, :] = 1 / 18 * np.array([0.0, 0.0, 4.0, -28.0, 56.0, -8.0]) + self.coeff_h[a][4, :] = 1 / 18 * np.array([0.0, 0.0, -1.0, 7.0, -17.0, 23.0]) elif self.p[a] == 4: - self.coeff_i[a][0, :] = 1 / 360 * xp.array([360.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 360 * xp.array([-59.0, 944.0, -1000.0, 720.0, -305.0, 64.0, -4.0]) - self.coeff_i[a][2, :] = 1 / 360 * xp.array([23.0, -368.0, 1580.0, -1360.0, 605.0, -128.0, 8.0]) - self.coeff_i[a][3, :] = 1 / 360 * xp.array([-16.0, 256.0, -1180.0, 2240.0, -1180.0, 256.0, -16.0]) - self.coeff_i[a][4, :] = 1 / 360 * xp.array([8.0, -128.0, 605.0, -1360.0, 1580.0, -368.0, 23.0]) - self.coeff_i[a][5, :] = 1 / 360 * xp.array([-4.0, 64.0, -305.0, 720.0, -1000.0, 944.0, -59.0]) - self.coeff_i[a][6, :] = 1 / 360 * xp.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 360.0]) - - self.coeff_h[a][0, :] = 1 / 360 * xp.array([419.0, -525.0, 475.0, -245.0, 60.0, -4.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 360 * xp.array([-82.0, 1230.0, -1350.0, 730.0, -180.0, 12.0, 0.0, 0.0]) - self.coeff_h[a][2, :] = 1 / 360 * xp.array([39.0, -585.0, 2175.0, -1425.0, 360.0, -24.0, 0.0, 0.0]) + self.coeff_i[a][0, :] = 1 / 360 * np.array([360.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 360 * np.array([-59.0, 944.0, -1000.0, 720.0, -305.0, 64.0, -4.0]) + self.coeff_i[a][2, :] = 1 / 360 * np.array([23.0, -368.0, 1580.0, -1360.0, 605.0, -128.0, 8.0]) + self.coeff_i[a][3, :] = 1 / 360 * np.array([-16.0, 256.0, -1180.0, 2240.0, -1180.0, 256.0, -16.0]) + self.coeff_i[a][4, :] = 1 / 360 * np.array([8.0, -128.0, 605.0, -1360.0, 1580.0, -368.0, 23.0]) + self.coeff_i[a][5, :] = 1 / 360 * np.array([-4.0, 64.0, -305.0, 720.0, -1000.0, 944.0, -59.0]) + self.coeff_i[a][6, :] = 1 / 360 * np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 360.0]) + + self.coeff_h[a][0, :] = 1 / 360 * np.array([419.0, -525.0, 475.0, -245.0, 60.0, -4.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 360 * np.array([-82.0, 1230.0, -1350.0, 730.0, -180.0, 12.0, 0.0, 0.0]) + self.coeff_h[a][2, :] = 1 / 360 * np.array([39.0, -585.0, 2175.0, -1425.0, 360.0, -24.0, 0.0, 0.0]) self.coeff_h[a][3, :] = ( - 1 / 360 * xp.array([-16.0, 240.0, -924.0, 1060.0, 1060.0, -924.0, 240.0, -16.0]) + 1 / 360 * np.array([-16.0, 240.0, -924.0, 1060.0, 1060.0, -924.0, 240.0, -16.0]) ) - self.coeff_h[a][4, :] = 1 / 360 * xp.array([0.0, 0.0, -24.0, 360.0, -1425.0, 2175.0, -585.0, 39.0]) - self.coeff_h[a][5, :] = 1 / 360 * xp.array([0.0, 0.0, 12.0, -180.0, 730.0, -1350.0, 1230.0, -82.0]) - self.coeff_h[a][6, :] = 1 / 360 * xp.array([0.0, 0.0, -4.0, 60.0, -245.0, 475.0, -525.0, 419.0]) + self.coeff_h[a][4, :] = 1 / 360 * np.array([0.0, 0.0, -24.0, 360.0, -1425.0, 2175.0, -585.0, 39.0]) + self.coeff_h[a][5, :] = 1 / 360 * np.array([0.0, 0.0, 12.0, -180.0, 730.0, -1350.0, 1230.0, -82.0]) + self.coeff_h[a][6, :] = 1 / 360 * np.array([0.0, 0.0, -4.0, 60.0, -245.0, 475.0, -525.0, 419.0]) else: print("degree > 4 not implemented!") @@ -150,31 +150,31 @@ def __init__(self, tensor_space, n_quad): ) # number of non-vanishing D bf in interpolation interval (1, 2, 4, 6) self.x_int = [ - xp.zeros((n_lambda_int, n_int), dtype=float) for n_lambda_int, n_int in zip(n_lambda_int, self.n_int) + np.zeros((n_lambda_int, n_int), dtype=float) for n_lambda_int, n_int in zip(n_lambda_int, self.n_int) ] self.int_global_N = [ - xp.zeros((n_lambda_int, n_int_locbf_N), dtype=int) + np.zeros((n_lambda_int, n_int_locbf_N), dtype=int) for n_lambda_int, n_int_locbf_N in zip(n_lambda_int, self.n_int_locbf_N) ] self.int_global_D = [ - xp.zeros((n_lambda_int, n_int_locbf_D), dtype=int) + np.zeros((n_lambda_int, n_int_locbf_D), dtype=int) for n_lambda_int, n_int_locbf_D in zip(n_lambda_int, self.n_int_locbf_D) ] self.int_loccof_N = [ - xp.zeros((n_lambda_int, n_int_locbf_N), dtype=int) + np.zeros((n_lambda_int, n_int_locbf_N), dtype=int) for n_lambda_int, n_int_locbf_N in zip(n_lambda_int, self.n_int_locbf_N) ] self.int_loccof_D = [ - xp.zeros((n_lambda_int, n_int_locbf_D), dtype=int) + np.zeros((n_lambda_int, n_int_locbf_D), dtype=int) for n_lambda_int, n_int_locbf_D in zip(n_lambda_int, self.n_int_locbf_D) ] self.x_int_indices = [ - xp.zeros((n_lambda_int, n_int), dtype=int) for n_lambda_int, n_int in zip(n_lambda_int, self.n_int) + np.zeros((n_lambda_int, n_int), dtype=int) for n_lambda_int, n_int in zip(n_lambda_int, self.n_int) ] - self.coeffi_indices = [xp.zeros(n_lambda_int, dtype=int) for n_lambda_int in n_lambda_int] + self.coeffi_indices = [np.zeros(n_lambda_int, dtype=int) for n_lambda_int in n_lambda_int] self.n_int_nvcof_D = [None, None, None] self.n_int_nvcof_N = [None, None, None] @@ -186,7 +186,7 @@ def __init__(self, tensor_space, n_quad): self.int_shift_N = [0, 0, 0] for a in range(3): - if not self.bc[a]: + if self.bc[a] == False: # maximum number of non-vanishing coefficients if self.p[a] == 1: self.n_int_nvcof_D[a] = 2 @@ -197,39 +197,39 @@ def __init__(self, tensor_space, n_quad): self.n_int_nvcof_N[a] = 3 * self.p[a] - 2 # shift in local coefficient indices at right boundary (only for non-periodic boundary conditions) - self.int_add_D[a] = xp.arange(self.n_int[a] - 2) + 1 - self.int_add_N[a] = xp.arange(self.n_int[a] - 1) + 1 + self.int_add_D[a] = np.arange(self.n_int[a] - 2) + 1 + self.int_add_N[a] = np.arange(self.n_int[a] - 1) + 1 counter_D = 0 counter_N = 0 # shift local coefficients --> global coefficients (D) if self.p[a] == 1: - self.int_shift_D[a] = xp.arange(self.NbaseD[a]) + self.int_shift_D[a] = np.arange(self.NbaseD[a]) else: - self.int_shift_D[a] = xp.arange(self.NbaseD[a]) - (self.p[a] - 2) + self.int_shift_D[a] = np.arange(self.NbaseD[a]) - (self.p[a] - 2) self.int_shift_D[a][: 2 * self.p[a] - 2] = 0 self.int_shift_D[a][-(2 * self.p[a] - 2) :] = self.int_shift_D[a][-(2 * self.p[a] - 2)] # shift local coefficients --> global coefficients (N) if self.p[a] == 1: - self.int_shift_N[a] = xp.arange(self.NbaseN[a]) + self.int_shift_N[a] = np.arange(self.NbaseN[a]) self.int_shift_N[a][-1] = self.int_shift_N[a][-2] else: - self.int_shift_N[a] = xp.arange(self.NbaseN[a]) - (self.p[a] - 1) + self.int_shift_N[a] = np.arange(self.NbaseN[a]) - (self.p[a] - 1) self.int_shift_N[a][: 2 * self.p[a] - 1] = 0 self.int_shift_N[a][-(2 * self.p[a] - 1) :] = self.int_shift_N[a][-(2 * self.p[a] - 1)] - counter_coeffi = xp.copy(self.p[a]) + counter_coeffi = np.copy(self.p[a]) for i in range(n_lambda_int[a]): # left boundary region if i < self.p[a] - 1: - self.int_global_N[a][i] = xp.arange(self.n_int_locbf_N[a]) - self.int_global_D[a][i] = xp.arange(self.n_int_locbf_D[a]) + self.int_global_N[a][i] = np.arange(self.n_int_locbf_N[a]) + self.int_global_D[a][i] = np.arange(self.n_int_locbf_D[a]) - self.x_int_indices[a][i] = xp.arange(self.n_int[a]) + self.x_int_indices[a][i] = np.arange(self.n_int[a]) self.coeffi_indices[a][i] = i for j in range(2 * (self.p[a] - 1) + 1): xi = self.p[a] - 1 @@ -240,13 +240,13 @@ def __init__(self, tensor_space, n_quad): # right boundary region elif i > n_lambda_int[a] - self.p[a]: self.int_global_N[a][i] = ( - xp.arange(self.n_int_locbf_N[a]) + n_lambda_int[a] - self.p[a] - (self.p[a] - 1) + np.arange(self.n_int_locbf_N[a]) + n_lambda_int[a] - self.p[a] - (self.p[a] - 1) ) self.int_global_D[a][i] = ( - xp.arange(self.n_int_locbf_D[a]) + n_lambda_int[a] - self.p[a] - (self.p[a] - 1) + np.arange(self.n_int_locbf_D[a]) + n_lambda_int[a] - self.p[a] - (self.p[a] - 1) ) - self.x_int_indices[a][i] = xp.arange(self.n_int[a]) + 2 * ( + self.x_int_indices[a][i] = np.arange(self.n_int[a]) + 2 * ( n_lambda_int[a] - self.p[a] - (self.p[a] - 1) ) self.coeffi_indices[a][i] = counter_coeffi @@ -260,20 +260,20 @@ def __init__(self, tensor_space, n_quad): # interior else: if self.p[a] == 1: - self.int_global_N[a][i] = xp.arange(self.n_int_locbf_N[a]) + i - self.int_global_D[a][i] = xp.arange(self.n_int_locbf_D[a]) + i + self.int_global_N[a][i] = np.arange(self.n_int_locbf_N[a]) + i + self.int_global_D[a][i] = np.arange(self.n_int_locbf_D[a]) + i self.int_global_N[a][-1] = self.int_global_N[a][-2] self.int_global_D[a][-1] = self.int_global_D[a][-2] else: - self.int_global_N[a][i] = xp.arange(self.n_int_locbf_N[a]) + i - (self.p[a] - 1) - self.int_global_D[a][i] = xp.arange(self.n_int_locbf_D[a]) + i - (self.p[a] - 1) + self.int_global_N[a][i] = np.arange(self.n_int_locbf_N[a]) + i - (self.p[a] - 1) + self.int_global_D[a][i] = np.arange(self.n_int_locbf_D[a]) + i - (self.p[a] - 1) if self.p[a] == 1: self.x_int_indices[a][i] = i else: - self.x_int_indices[a][i] = xp.arange(self.n_int[a]) + 2 * (i - (self.p[a] - 1)) + self.x_int_indices[a][i] = np.arange(self.n_int[a]) + 2 * (i - (self.p[a] - 1)) self.coeffi_indices[a][i] = self.p[a] - 1 @@ -284,8 +284,8 @@ def __init__(self, tensor_space, n_quad): # local coefficient index if self.p[a] == 1: - self.int_loccof_N[a][i] = xp.array([0, 1]) - self.int_loccof_D[a][-1] = xp.array([1]) + self.int_loccof_N[a][i] = np.array([0, 1]) + self.int_loccof_D[a][-1] = np.array([1]) else: if i > 0: @@ -293,8 +293,8 @@ def __init__(self, tensor_space, n_quad): k_glob_new = self.int_global_D[a][i, il] bol = k_glob_new == self.int_global_D[a][i - 1] - if xp.any(bol): - self.int_loccof_D[a][i, il] = self.int_loccof_D[a][i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.int_loccof_D[a][i, il] = self.int_loccof_D[a][i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_int[a] - self.p[a] - (self.p[a] - 2)) and ( self.int_loccof_D[a][i, il] == 0 @@ -306,8 +306,8 @@ def __init__(self, tensor_space, n_quad): k_glob_new = self.int_global_N[a][i, il] bol = k_glob_new == self.int_global_N[a][i - 1] - if xp.any(bol): - self.int_loccof_N[a][i, il] = self.int_loccof_N[a][i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.int_loccof_N[a][i, il] = self.int_loccof_N[a][i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_int[a] - self.p[a] - (self.p[a] - 2)) and ( self.int_loccof_N[a][i, il] == 0 @@ -327,24 +327,24 @@ def __init__(self, tensor_space, n_quad): # shift local coefficients --> global coefficients if self.p[a] == 1: - self.int_shift_D[a] = xp.arange(self.NbaseN[a]) - (self.p[a] - 1) - self.int_shift_N[a] = xp.arange(self.NbaseN[a]) - (self.p[a]) + self.int_shift_D[a] = np.arange(self.NbaseN[a]) - (self.p[a] - 1) + self.int_shift_N[a] = np.arange(self.NbaseN[a]) - (self.p[a]) else: - self.int_shift_D[a] = xp.arange(self.NbaseN[a]) - (self.p[a] - 2) - self.int_shift_N[a] = xp.arange(self.NbaseN[a]) - (self.p[a] - 1) + self.int_shift_D[a] = np.arange(self.NbaseN[a]) - (self.p[a] - 2) + self.int_shift_N[a] = np.arange(self.NbaseN[a]) - (self.p[a] - 1) for i in range(n_lambda_int[a]): # global indices of non-vanishing basis functions and position of coefficients in final matrix - self.int_global_N[a][i] = (xp.arange(self.n_int_locbf_N[a]) + i - (self.p[a] - 1)) % self.NbaseN[a] - self.int_global_D[a][i] = (xp.arange(self.n_int_locbf_D[a]) + i - (self.p[a] - 1)) % self.NbaseD[a] + self.int_global_N[a][i] = (np.arange(self.n_int_locbf_N[a]) + i - (self.p[a] - 1)) % self.NbaseN[a] + self.int_global_D[a][i] = (np.arange(self.n_int_locbf_D[a]) + i - (self.p[a] - 1)) % self.NbaseD[a] - self.int_loccof_N[a][i] = xp.arange(self.n_int_locbf_N[a] - 1, -1, -1) - self.int_loccof_D[a][i] = xp.arange(self.n_int_locbf_D[a] - 1, -1, -1) + self.int_loccof_N[a][i] = np.arange(self.n_int_locbf_N[a] - 1, -1, -1) + self.int_loccof_D[a][i] = np.arange(self.n_int_locbf_D[a] - 1, -1, -1) if self.p[a] == 1: self.x_int_indices[a][i] = i else: - self.x_int_indices[a][i] = (xp.arange(self.n_int[a]) + 2 * (i - (self.p[a] - 1))) % ( + self.x_int_indices[a][i] = (np.arange(self.n_int[a]) + 2 * (i - (self.p[a] - 1))) % ( 2 * self.Nel[a] ) @@ -356,41 +356,41 @@ def __init__(self, tensor_space, n_quad): ) % 1.0 # identify unique interpolation points to save memory - self.x_int[a] = xp.unique(self.x_int[a].flatten()) + self.x_int[a] = np.unique(self.x_int[a].flatten()) # set histopolation points, quadrature points and weights - n_lambda_his = [xp.copy(NbaseD) for NbaseD in self.NbaseD] # number of coefficients in space V1 + n_lambda_his = [np.copy(NbaseD) for NbaseD in self.NbaseD] # number of coefficients in space V1 self.n_his = [2 * p for p in self.p] # number of histopolation intervals self.n_his_locbf_N = [2 * p for p in self.p] # number of non-vanishing N bf in histopolation interval self.n_his_locbf_D = [2 * p - 1 for p in self.p] # number of non-vanishing D bf in histopolation interval self.x_his = [ - xp.zeros((n_lambda_his, n_his + 1), dtype=float) for n_lambda_his, n_his in zip(n_lambda_his, self.n_his) + np.zeros((n_lambda_his, n_his + 1), dtype=float) for n_lambda_his, n_his in zip(n_lambda_his, self.n_his) ] self.his_global_N = [ - xp.zeros((n_lambda_his, n_his_locbf_N), dtype=int) + np.zeros((n_lambda_his, n_his_locbf_N), dtype=int) for n_lambda_his, n_his_locbf_N in zip(n_lambda_his, self.n_his_locbf_N) ] self.his_global_D = [ - xp.zeros((n_lambda_his, n_his_locbf_D), dtype=int) + np.zeros((n_lambda_his, n_his_locbf_D), dtype=int) for n_lambda_his, n_his_locbf_D in zip(n_lambda_his, self.n_his_locbf_D) ] self.his_loccof_N = [ - xp.zeros((n_lambda_his, n_his_locbf_N), dtype=int) + np.zeros((n_lambda_his, n_his_locbf_N), dtype=int) for n_lambda_his, n_his_locbf_N in zip(n_lambda_his, self.n_his_locbf_N) ] self.his_loccof_D = [ - xp.zeros((n_lambda_his, n_his_locbf_D), dtype=int) + np.zeros((n_lambda_his, n_his_locbf_D), dtype=int) for n_lambda_his, n_his_locbf_D in zip(n_lambda_his, self.n_his_locbf_D) ] self.x_his_indices = [ - xp.zeros((n_lambda_his, n_his), dtype=int) for n_lambda_his, n_his in zip(n_lambda_his, self.n_his) + np.zeros((n_lambda_his, n_his), dtype=int) for n_lambda_his, n_his in zip(n_lambda_his, self.n_his) ] - self.coeffh_indices = [xp.zeros(n_lambda_his, dtype=int) for n_lambda_his in n_lambda_his] + self.coeffh_indices = [np.zeros(n_lambda_his, dtype=int) for n_lambda_his in n_lambda_his] self.pts = [0, 0, 0] self.wts = [0, 0, 0] @@ -405,37 +405,37 @@ def __init__(self, tensor_space, n_quad): self.his_shift_N = [0, 0, 0] for a in range(3): - if not self.bc[a]: + if self.bc[a] == False: # maximum number of non-vanishing coefficients self.n_his_nvcof_D[a] = 3 * self.p[a] - 2 self.n_his_nvcof_N[a] = 3 * self.p[a] - 1 # shift in local coefficient indices at right boundary (only for non-periodic boundary conditions) - self.his_add_D[a] = xp.arange(self.n_his[a] - 2) + 1 - self.his_add_N[a] = xp.arange(self.n_his[a] - 1) + 1 + self.his_add_D[a] = np.arange(self.n_his[a] - 2) + 1 + self.his_add_N[a] = np.arange(self.n_his[a] - 1) + 1 counter_D = 0 counter_N = 0 # shift local coefficients --> global coefficients (D) - self.his_shift_D[a] = xp.arange(self.NbaseD[a]) - (self.p[a] - 1) + self.his_shift_D[a] = np.arange(self.NbaseD[a]) - (self.p[a] - 1) self.his_shift_D[a][: 2 * self.p[a] - 1] = 0 self.his_shift_D[a][-(2 * self.p[a] - 1) :] = self.his_shift_D[a][-(2 * self.p[a] - 1)] # shift local coefficients --> global coefficients (N) - self.his_shift_N[a] = xp.arange(self.NbaseN[a]) - self.p[a] + self.his_shift_N[a] = np.arange(self.NbaseN[a]) - self.p[a] self.his_shift_N[a][: 2 * self.p[a]] = 0 self.his_shift_N[a][-2 * self.p[a] :] = self.his_shift_N[a][-2 * self.p[a]] - counter_coeffh = xp.copy(self.p[a]) + counter_coeffh = np.copy(self.p[a]) for i in range(n_lambda_his[a]): # left boundary region if i < self.p[a] - 1: - self.his_global_N[a][i] = xp.arange(self.n_his_locbf_N[a]) - self.his_global_D[a][i] = xp.arange(self.n_his_locbf_D[a]) + self.his_global_N[a][i] = np.arange(self.n_his_locbf_N[a]) + self.his_global_D[a][i] = np.arange(self.n_his_locbf_D[a]) - self.x_his_indices[a][i] = xp.arange(self.n_his[a]) + self.x_his_indices[a][i] = np.arange(self.n_his[a]) self.coeffh_indices[a][i] = i for j in range(2 * self.p[a] + 1): xi = self.p[a] - 1 @@ -446,13 +446,13 @@ def __init__(self, tensor_space, n_quad): # right boundary region elif i > n_lambda_his[a] - self.p[a]: self.his_global_N[a][i] = ( - xp.arange(self.n_his_locbf_N[a]) + n_lambda_his[a] - self.p[a] - (self.p[a] - 1) + np.arange(self.n_his_locbf_N[a]) + n_lambda_his[a] - self.p[a] - (self.p[a] - 1) ) self.his_global_D[a][i] = ( - xp.arange(self.n_his_locbf_D[a]) + n_lambda_his[a] - self.p[a] - (self.p[a] - 1) + np.arange(self.n_his_locbf_D[a]) + n_lambda_his[a] - self.p[a] - (self.p[a] - 1) ) - self.x_his_indices[a][i] = xp.arange(self.n_his[a]) + 2 * ( + self.x_his_indices[a][i] = np.arange(self.n_his[a]) + 2 * ( n_lambda_his[a] - self.p[a] - (self.p[a] - 1) ) self.coeffh_indices[a][i] = counter_coeffh @@ -465,10 +465,10 @@ def __init__(self, tensor_space, n_quad): # interior else: - self.his_global_N[a][i] = xp.arange(self.n_his_locbf_N[a]) + i - (self.p[a] - 1) - self.his_global_D[a][i] = xp.arange(self.n_his_locbf_D[a]) + i - (self.p[a] - 1) + self.his_global_N[a][i] = np.arange(self.n_his_locbf_N[a]) + i - (self.p[a] - 1) + self.his_global_D[a][i] = np.arange(self.n_his_locbf_D[a]) + i - (self.p[a] - 1) - self.x_his_indices[a][i] = xp.arange(self.n_his[a]) + 2 * (i - (self.p[a] - 1)) + self.x_his_indices[a][i] = np.arange(self.n_his[a]) + 2 * (i - (self.p[a] - 1)) self.coeffh_indices[a][i] = self.p[a] - 1 for j in range(2 * self.p[a] + 1): self.x_his[a][i, j] = ( @@ -481,8 +481,8 @@ def __init__(self, tensor_space, n_quad): k_glob_new = self.his_global_D[a][i, il] bol = k_glob_new == self.his_global_D[a][i - 1] - if xp.any(bol): - self.his_loccof_D[a][i, il] = self.his_loccof_D[a][i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.his_loccof_D[a][i, il] = self.his_loccof_D[a][i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_his[a] - self.p[a] - (self.p[a] - 2)) and ( self.his_loccof_D[a][i, il] == 0 @@ -494,8 +494,8 @@ def __init__(self, tensor_space, n_quad): k_glob_new = self.his_global_N[a][i, il] bol = k_glob_new == self.his_global_N[a][i - 1] - if xp.any(bol): - self.his_loccof_N[a][i, il] = self.his_loccof_N[a][i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.his_loccof_N[a][i, il] = self.his_loccof_N[a][i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_his[a] - self.p[a] - (self.p[a] - 2)) and ( self.his_loccof_N[a][i, il] == 0 @@ -505,9 +505,7 @@ def __init__(self, tensor_space, n_quad): # quadrature points and weights self.pts[a], self.wts[a] = bsp.quadrature_grid( - xp.unique(self.x_his[a].flatten()), - self.pts_loc[a], - self.wts_loc[a], + np.unique(self.x_his[a].flatten()), self.pts_loc[a], self.wts_loc[a] ) else: @@ -516,18 +514,18 @@ def __init__(self, tensor_space, n_quad): self.n_his_nvcof_N[a] = 2 * self.p[a] # shift local coefficients --> global coefficients (D) - self.his_shift_D[a] = xp.arange(self.NbaseD[a]) - (self.p[a] - 1) + self.his_shift_D[a] = np.arange(self.NbaseD[a]) - (self.p[a] - 1) # shift local coefficients --> global coefficients (N) - self.his_shift_N[a] = xp.arange(self.NbaseD[a]) - self.p[a] + self.his_shift_N[a] = np.arange(self.NbaseD[a]) - self.p[a] for i in range(n_lambda_his[a]): - self.his_global_N[a][i] = (xp.arange(self.n_his_locbf_N[a]) + i - (self.p[a] - 1)) % self.NbaseN[a] - self.his_global_D[a][i] = (xp.arange(self.n_his_locbf_D[a]) + i - (self.p[a] - 1)) % self.NbaseD[a] - self.his_loccof_N[a][i] = xp.arange(self.n_his_locbf_N[a] - 1, -1, -1) - self.his_loccof_D[a][i] = xp.arange(self.n_his_locbf_D[a] - 1, -1, -1) + self.his_global_N[a][i] = (np.arange(self.n_his_locbf_N[a]) + i - (self.p[a] - 1)) % self.NbaseN[a] + self.his_global_D[a][i] = (np.arange(self.n_his_locbf_D[a]) + i - (self.p[a] - 1)) % self.NbaseD[a] + self.his_loccof_N[a][i] = np.arange(self.n_his_locbf_N[a] - 1, -1, -1) + self.his_loccof_D[a][i] = np.arange(self.n_his_locbf_D[a] - 1, -1, -1) - self.x_his_indices[a][i] = (xp.arange(self.n_his[a]) + 2 * (i - (self.p[a] - 1))) % ( + self.x_his_indices[a][i] = (np.arange(self.n_his[a]) + 2 * (i - (self.p[a] - 1))) % ( 2 * self.Nel[a] ) self.coeffh_indices[a][i] = 0 @@ -537,9 +535,7 @@ def __init__(self, tensor_space, n_quad): # quadrature points and weights self.pts[a], self.wts[a] = bsp.quadrature_grid( - xp.append(xp.unique(self.x_his[a].flatten() % 1.0), 1.0), - self.pts_loc[a], - self.wts_loc[a], + np.append(np.unique(self.x_his[a].flatten() % 1.0), 1.0), self.pts_loc[a], self.wts_loc[a] ) # evaluate N basis functions at interpolation and quadrature points @@ -560,9 +556,7 @@ def __init__(self, tensor_space, n_quad): self.basisD_his = [ bsp.collocation_matrix(T[1:-1], p - 1, pts.flatten(), bc, normalize=True).reshape( - pts[:, 0].size, - pts[0, :].size, - NbaseD, + pts[:, 0].size, pts[0, :].size, NbaseD ) for T, p, pts, bc, NbaseD in zip(self.T, self.p, self.pts, self.bc, self.NbaseD) ] @@ -592,7 +586,7 @@ def projection_Q_0form(self, domain): """ # non-vanishing coefficients - Q11 = xp.empty( + Q11 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -603,7 +597,7 @@ def projection_Q_0form(self, domain): ), dtype=float, ) - Q22 = xp.empty( + Q22 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -614,7 +608,7 @@ def projection_Q_0form(self, domain): ), dtype=float, ) - Q33 = xp.empty( + Q33 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -632,7 +626,7 @@ def projection_Q_0form(self, domain): n_unique3 = [self.pts[0].flatten().size, self.pts[1].flatten().size, self.x_int[2].size] # ========= assembly of 1 - component (pi2_1 : int, his, his) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -688,7 +682,7 @@ def projection_Q_0form(self, domain): ) # ========= assembly of 2 - component (pi2_2 : his, int, his) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -744,7 +738,7 @@ def projection_Q_0form(self, domain): ) # ========= assembly of 3 - component (pi2_3 : his, his, int) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -800,7 +794,7 @@ def projection_Q_0form(self, domain): ) # ========= conversion to sparse matrices (1 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -808,7 +802,7 @@ def projection_Q_0form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -825,7 +819,7 @@ def projection_Q_0form(self, domain): Q11.eliminate_zeros() # ========= conversion to sparse matrices (2 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -833,7 +827,7 @@ def projection_Q_0form(self, domain): self.n_his_nvcof_N[0], self.n_int_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -850,7 +844,7 @@ def projection_Q_0form(self, domain): Q22.eliminate_zeros() # ========= conversion to sparse matrices (3 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -858,7 +852,7 @@ def projection_Q_0form(self, domain): self.n_his_nvcof_N[0], self.n_his_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -901,7 +895,7 @@ def projection_Q_2form(self, domain): """ # non-vanishing coefficients - Q11 = xp.empty( + Q11 = np.empty( ( self.NbaseN[0], self.NbaseD[1], @@ -912,7 +906,7 @@ def projection_Q_2form(self, domain): ), dtype=float, ) - Q22 = xp.empty( + Q22 = np.empty( ( self.NbaseD[0], self.NbaseN[1], @@ -923,7 +917,7 @@ def projection_Q_2form(self, domain): ), dtype=float, ) - Q33 = xp.empty( + Q33 = np.empty( ( self.NbaseD[0], self.NbaseD[1], @@ -941,7 +935,7 @@ def projection_Q_2form(self, domain): n_unique3 = [self.pts[0].flatten().size, self.pts[1].flatten().size, self.x_int[2].size] # ========= assembly of 1 - component (pi2_1 : int, his, his) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -997,7 +991,7 @@ def projection_Q_2form(self, domain): ) # ========= assembly of 2 - component (pi2_2 : his, int, his) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -1053,7 +1047,7 @@ def projection_Q_2form(self, domain): ) # ========= assembly of 3 - component (pi2_3 : his, his, int) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -1109,7 +1103,7 @@ def projection_Q_2form(self, domain): ) # ========= conversion to sparse matrices (1 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseD[1], @@ -1117,7 +1111,7 @@ def projection_Q_2form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_D[1], self.n_his_nvcof_D[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -1134,7 +1128,7 @@ def projection_Q_2form(self, domain): Q11.eliminate_zeros() # ========= conversion to sparse matrices (2 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseN[1], @@ -1142,7 +1136,7 @@ def projection_Q_2form(self, domain): self.n_his_nvcof_D[0], self.n_int_nvcof_N[1], self.n_his_nvcof_D[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -1159,7 +1153,7 @@ def projection_Q_2form(self, domain): Q22.eliminate_zeros() # ========= conversion to sparse matrices (3 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseD[1], @@ -1167,7 +1161,7 @@ def projection_Q_2form(self, domain): self.n_his_nvcof_D[0], self.n_his_nvcof_D[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -1210,7 +1204,7 @@ def projection_W_0form(self, domain): """ # non-vanishing coefficients - W1 = xp.empty( + W1 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1221,14 +1215,14 @@ def projection_W_0form(self, domain): ), dtype=float, ) - # W2 = xp.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2], self.n_int_nvcof_N[0], self.n_int_nvcof_N[1], self.n_int_nvcof_N[2]), dtype=float) - # W3 = xp.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2], self.n_int_nvcof_N[0], self.n_int_nvcof_N[1], self.n_int_nvcof_N[2]), dtype=float) + # W2 = np.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2], self.n_int_nvcof_N[0], self.n_int_nvcof_N[1], self.n_int_nvcof_N[2]), dtype=float) + # W3 = np.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2], self.n_int_nvcof_N[0], self.n_int_nvcof_N[1], self.n_int_nvcof_N[2]), dtype=float) # size of interpolation/quadrature points of the 3 components n_unique = [self.x_int[0].size, self.x_int[1].size, self.x_int[2].size] # assembly - mat_eq = xp.empty((n_unique[0], n_unique[1], n_unique[2]), dtype=float) + mat_eq = np.empty((n_unique[0], n_unique[1], n_unique[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -1290,7 +1284,7 @@ def projection_W_0form(self, domain): """ # conversion to sparse matrix - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -1298,7 +1292,7 @@ def projection_W_0form(self, domain): self.n_int_nvcof_N[0], self.n_int_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) # row indices @@ -1351,7 +1345,7 @@ def projection_T_0form(self, domain): """ # non-vanishing coefficients - T12 = xp.empty( + T12 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1362,7 +1356,7 @@ def projection_T_0form(self, domain): ), dtype=float, ) - T13 = xp.empty( + T13 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1374,7 +1368,7 @@ def projection_T_0form(self, domain): dtype=float, ) - T21 = xp.empty( + T21 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1385,7 +1379,7 @@ def projection_T_0form(self, domain): ), dtype=float, ) - T23 = xp.empty( + T23 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1397,7 +1391,7 @@ def projection_T_0form(self, domain): dtype=float, ) - T31 = xp.empty( + T31 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1408,7 +1402,7 @@ def projection_T_0form(self, domain): ), dtype=float, ) - T32 = xp.empty( + T32 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1426,7 +1420,7 @@ def projection_T_0form(self, domain): n_unique3 = [self.x_int[0].size, self.x_int[1].size, self.pts[2].flatten().size] # ================= assembly of 1 - component (pi1_1 : his, int, int) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -1521,7 +1515,7 @@ def projection_T_0form(self, domain): ) # ================= assembly of 2 - component (PI_1_2 : int, his, int) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -1627,7 +1621,7 @@ def projection_T_0form(self, domain): ) # ================= assembly of 3 - component (PI_1_3 : int, int, his) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -1733,7 +1727,7 @@ def projection_T_0form(self, domain): ) # conversion to sparse matrices (1 - component) - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -1741,7 +1735,7 @@ def projection_T_0form(self, domain): self.n_his_nvcof_N[0], self.n_int_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -1757,7 +1751,7 @@ def projection_T_0form(self, domain): ) T12.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -1765,7 +1759,7 @@ def projection_T_0form(self, domain): self.n_his_nvcof_N[0], self.n_int_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -1782,7 +1776,7 @@ def projection_T_0form(self, domain): T13.eliminate_zeros() # conversion to sparse matrices (2 - component) - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -1790,7 +1784,7 @@ def projection_T_0form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -1806,7 +1800,7 @@ def projection_T_0form(self, domain): ) T21.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -1814,7 +1808,7 @@ def projection_T_0form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -1831,7 +1825,7 @@ def projection_T_0form(self, domain): T23.eliminate_zeros() # conversion to sparse matrices (3 - component) - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -1839,7 +1833,7 @@ def projection_T_0form(self, domain): self.n_int_nvcof_N[0], self.n_int_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -1855,7 +1849,7 @@ def projection_T_0form(self, domain): ) T31.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -1863,7 +1857,7 @@ def projection_T_0form(self, domain): self.n_int_nvcof_N[0], self.n_int_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -1906,7 +1900,7 @@ def projection_T_1form(self, domain): """ # non-vanishing coefficients - T12 = xp.empty( + T12 = np.empty( ( self.NbaseN[0], self.NbaseD[1], @@ -1917,7 +1911,7 @@ def projection_T_1form(self, domain): ), dtype=float, ) - T13 = xp.empty( + T13 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1929,7 +1923,7 @@ def projection_T_1form(self, domain): dtype=float, ) - T21 = xp.empty( + T21 = np.empty( ( self.NbaseD[0], self.NbaseN[1], @@ -1940,7 +1934,7 @@ def projection_T_1form(self, domain): ), dtype=float, ) - T23 = xp.empty( + T23 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -1952,7 +1946,7 @@ def projection_T_1form(self, domain): dtype=float, ) - T31 = xp.empty( + T31 = np.empty( ( self.NbaseD[0], self.NbaseN[1], @@ -1963,7 +1957,7 @@ def projection_T_1form(self, domain): ), dtype=float, ) - T32 = xp.empty( + T32 = np.empty( ( self.NbaseN[0], self.NbaseD[1], @@ -1981,7 +1975,7 @@ def projection_T_1form(self, domain): n_unique3 = [self.x_int[0].size, self.x_int[1].size, self.pts[2].flatten().size] # ================= assembly of 1 - component (pi1_1 : his, int, int) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -2076,7 +2070,7 @@ def projection_T_1form(self, domain): ) # ================= assembly of 2 - component (PI_1_2 : int, his, int) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -2171,7 +2165,7 @@ def projection_T_1form(self, domain): ) # ================= assembly of 3 - component (PI_1_3 : int, int, his) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -2266,7 +2260,7 @@ def projection_T_1form(self, domain): ) # conversion to sparse matrices (1 - component) - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseD[1], @@ -2274,7 +2268,7 @@ def projection_T_1form(self, domain): self.n_his_nvcof_N[0], self.n_int_nvcof_D[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -2290,7 +2284,7 @@ def projection_T_1form(self, domain): ) T12.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -2298,7 +2292,7 @@ def projection_T_1form(self, domain): self.n_his_nvcof_N[0], self.n_int_nvcof_N[1], self.n_int_nvcof_D[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -2315,7 +2309,7 @@ def projection_T_1form(self, domain): T13.eliminate_zeros() # conversion to sparse matrices (2 - component) - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseN[1], @@ -2323,7 +2317,7 @@ def projection_T_1form(self, domain): self.n_int_nvcof_D[0], self.n_his_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -2339,7 +2333,7 @@ def projection_T_1form(self, domain): ) T21.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -2347,7 +2341,7 @@ def projection_T_1form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_N[1], self.n_int_nvcof_D[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -2364,7 +2358,7 @@ def projection_T_1form(self, domain): T23.eliminate_zeros() # conversion to sparse matrices (3 - component) - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseN[1], @@ -2372,7 +2366,7 @@ def projection_T_1form(self, domain): self.n_int_nvcof_D[0], self.n_int_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -2388,7 +2382,7 @@ def projection_T_1form(self, domain): ) T31.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseD[1], @@ -2396,7 +2390,7 @@ def projection_T_1form(self, domain): self.n_int_nvcof_N[0], self.n_int_nvcof_D[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -2439,7 +2433,7 @@ def projection_T_2form(self, domain): """ # non-vanishing coefficients - T12 = xp.empty( + T12 = np.empty( ( self.NbaseD[0], self.NbaseN[1], @@ -2450,7 +2444,7 @@ def projection_T_2form(self, domain): ), dtype=float, ) - T13 = xp.empty( + T13 = np.empty( ( self.NbaseD[0], self.NbaseD[1], @@ -2462,7 +2456,7 @@ def projection_T_2form(self, domain): dtype=float, ) - T21 = xp.empty( + T21 = np.empty( ( self.NbaseN[0], self.NbaseD[1], @@ -2473,7 +2467,7 @@ def projection_T_2form(self, domain): ), dtype=float, ) - T23 = xp.empty( + T23 = np.empty( ( self.NbaseD[0], self.NbaseD[1], @@ -2485,7 +2479,7 @@ def projection_T_2form(self, domain): dtype=float, ) - T31 = xp.empty( + T31 = np.empty( ( self.NbaseN[0], self.NbaseD[1], @@ -2496,7 +2490,7 @@ def projection_T_2form(self, domain): ), dtype=float, ) - T32 = xp.empty( + T32 = np.empty( ( self.NbaseD[0], self.NbaseN[1], @@ -2514,7 +2508,7 @@ def projection_T_2form(self, domain): n_unique3 = [self.x_int[0].size, self.x_int[1].size, self.pts[2].flatten().size] # ================= assembly of 1 - component (pi1_1 : his, int, int) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -2609,7 +2603,7 @@ def projection_T_2form(self, domain): ) # ================= assembly of 2 - component (PI_1_2 : int, his, int) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -2704,7 +2698,7 @@ def projection_T_2form(self, domain): ) # ================= assembly of 3 - component (PI_1_3 : int, int, his) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -2799,7 +2793,7 @@ def projection_T_2form(self, domain): ) # ============== conversion to sparse matrices (1 - component) ============== - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseN[1], @@ -2807,7 +2801,7 @@ def projection_T_2form(self, domain): self.n_his_nvcof_D[0], self.n_int_nvcof_N[1], self.n_int_nvcof_D[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -2823,7 +2817,7 @@ def projection_T_2form(self, domain): ) T12.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseD[1], @@ -2831,7 +2825,7 @@ def projection_T_2form(self, domain): self.n_his_nvcof_D[0], self.n_int_nvcof_D[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -2848,7 +2842,7 @@ def projection_T_2form(self, domain): T13.eliminate_zeros() # ============== conversion to sparse matrices (2 - component) ============== - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseD[1], @@ -2856,7 +2850,7 @@ def projection_T_2form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_D[1], self.n_int_nvcof_D[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -2872,7 +2866,7 @@ def projection_T_2form(self, domain): ) T21.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseD[1], @@ -2880,7 +2874,7 @@ def projection_T_2form(self, domain): self.n_int_nvcof_D[0], self.n_his_nvcof_D[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -2897,7 +2891,7 @@ def projection_T_2form(self, domain): T23.eliminate_zeros() # ============== conversion to sparse matrices (3 - component) ============== - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseD[1], @@ -2905,7 +2899,7 @@ def projection_T_2form(self, domain): self.n_int_nvcof_N[0], self.n_int_nvcof_D[1], self.n_his_nvcof_D[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -2921,7 +2915,7 @@ def projection_T_2form(self, domain): ) T31.eliminate_zeros() - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseN[1], @@ -2929,7 +2923,7 @@ def projection_T_2form(self, domain): self.n_int_nvcof_D[0], self.n_int_nvcof_N[1], self.n_his_nvcof_D[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -2972,7 +2966,7 @@ def projection_S_0form(self, domain): """ # non-vanishing coefficients - S11 = xp.empty( + S11 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -2983,7 +2977,7 @@ def projection_S_0form(self, domain): ), dtype=float, ) - S22 = xp.empty( + S22 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -2994,7 +2988,7 @@ def projection_S_0form(self, domain): ), dtype=float, ) - S33 = xp.empty( + S33 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -3012,7 +3006,7 @@ def projection_S_0form(self, domain): n_unique3 = [self.pts[0].flatten().size, self.pts[1].flatten().size, self.x_int[2].size] # ========= assembly of 1 - component (pi2_1 : int, his, his) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -3068,7 +3062,7 @@ def projection_S_0form(self, domain): ) # ========= assembly of 2 - component (pi2_2 : his, int, his) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -3124,7 +3118,7 @@ def projection_S_0form(self, domain): ) # ========= assembly of 3 - component (pi2_3 : his, his, int) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -3180,7 +3174,7 @@ def projection_S_0form(self, domain): ) # ========= conversion to sparse matrices (1 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -3188,7 +3182,7 @@ def projection_S_0form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -3205,7 +3199,7 @@ def projection_S_0form(self, domain): S11.eliminate_zeros() # ========= conversion to sparse matrices (2 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -3213,7 +3207,7 @@ def projection_S_0form(self, domain): self.n_his_nvcof_N[0], self.n_int_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -3230,7 +3224,7 @@ def projection_S_0form(self, domain): S22.eliminate_zeros() # ========= conversion to sparse matrices (3 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -3238,7 +3232,7 @@ def projection_S_0form(self, domain): self.n_his_nvcof_N[0], self.n_his_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -3281,7 +3275,7 @@ def projection_S_2form(self, domain): """ # non-vanishing coefficients - S11 = xp.empty( + S11 = np.empty( ( self.NbaseN[0], self.NbaseD[1], @@ -3292,7 +3286,7 @@ def projection_S_2form(self, domain): ), dtype=float, ) - S22 = xp.empty( + S22 = np.empty( ( self.NbaseD[0], self.NbaseN[1], @@ -3303,7 +3297,7 @@ def projection_S_2form(self, domain): ), dtype=float, ) - S33 = xp.empty( + S33 = np.empty( ( self.NbaseD[0], self.NbaseD[1], @@ -3321,7 +3315,7 @@ def projection_S_2form(self, domain): n_unique3 = [self.pts[0].flatten().size, self.pts[1].flatten().size, self.x_int[2].size] # ========= assembly of 1 - component (pi2_1 : int, his, his) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -3377,7 +3371,7 @@ def projection_S_2form(self, domain): ) # ========= assembly of 2 - component (pi2_2 : his, int, his) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -3433,7 +3427,7 @@ def projection_S_2form(self, domain): ) # ========= assembly of 3 - component (pi2_3 : his, his, int) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -3489,7 +3483,7 @@ def projection_S_2form(self, domain): ) # ========= conversion to sparse matrices (1 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseD[1], @@ -3497,7 +3491,7 @@ def projection_S_2form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_D[1], self.n_his_nvcof_D[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -3514,7 +3508,7 @@ def projection_S_2form(self, domain): S11.eliminate_zeros() # ========= conversion to sparse matrices (2 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseN[1], @@ -3522,7 +3516,7 @@ def projection_S_2form(self, domain): self.n_his_nvcof_D[0], self.n_int_nvcof_N[1], self.n_his_nvcof_D[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -3539,7 +3533,7 @@ def projection_S_2form(self, domain): S22.eliminate_zeros() # ========= conversion to sparse matrices (3 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseD[1], @@ -3547,7 +3541,7 @@ def projection_S_2form(self, domain): self.n_his_nvcof_D[0], self.n_his_nvcof_D[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -3588,7 +3582,7 @@ def projection_K_3form(self, domain): """ # non-vanishing coefficients - K = xp.zeros( + K = np.zeros( ( self.NbaseD[0], self.NbaseD[1], @@ -3603,7 +3597,7 @@ def projection_K_3form(self, domain): # evaluation of equilibrium pressure at interpolation points n_unique = [self.pts[0].flatten().size, self.pts[1].flatten().size, self.pts[2].flatten().size] - mat_eq = xp.zeros((n_unique[0], n_unique[1], n_unique[2]), dtype=float) + mat_eq = np.zeros((n_unique[0], n_unique[1], n_unique[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -3662,7 +3656,7 @@ def projection_K_3form(self, domain): ) # conversion to sparse matrix - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseD[1], @@ -3670,7 +3664,7 @@ def projection_K_3form(self, domain): self.n_his_nvcof_D[0], self.n_his_nvcof_D[1], self.n_his_nvcof_D[2], - ), + ) ) # row indices @@ -3717,7 +3711,7 @@ def projection_N_0form(self, domain): """ # non-vanishing coefficients - N11 = xp.empty( + N11 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -3728,7 +3722,7 @@ def projection_N_0form(self, domain): ), dtype=float, ) - N22 = xp.empty( + N22 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -3739,7 +3733,7 @@ def projection_N_0form(self, domain): ), dtype=float, ) - N33 = xp.empty( + N33 = np.empty( ( self.NbaseN[0], self.NbaseN[1], @@ -3757,7 +3751,7 @@ def projection_N_0form(self, domain): n_unique3 = [self.pts[0].flatten().size, self.pts[1].flatten().size, self.x_int[2].size] # ========= assembly of 1 - component (pi2_1 : int, his, his) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -3813,7 +3807,7 @@ def projection_N_0form(self, domain): ) # ========= assembly of 2 - component (pi2_2 : his, int, his) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -3869,7 +3863,7 @@ def projection_N_0form(self, domain): ) # ========= assembly of 3 - component (pi2_3 : his, his, int) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -3925,7 +3919,7 @@ def projection_N_0form(self, domain): ) # ========= conversion to sparse matrices (1 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -3933,7 +3927,7 @@ def projection_N_0form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -3950,7 +3944,7 @@ def projection_N_0form(self, domain): N11.eliminate_zeros() # ========= conversion to sparse matrices (2 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -3958,7 +3952,7 @@ def projection_N_0form(self, domain): self.n_his_nvcof_N[0], self.n_int_nvcof_N[1], self.n_his_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -3975,7 +3969,7 @@ def projection_N_0form(self, domain): N22.eliminate_zeros() # ========= conversion to sparse matrices (3 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseN[1], @@ -3983,7 +3977,7 @@ def projection_N_0form(self, domain): self.n_his_nvcof_N[0], self.n_his_nvcof_N[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -4026,7 +4020,7 @@ def projection_N_2form(self, domain): """ # non-vanishing coefficients - N11 = xp.empty( + N11 = np.empty( ( self.NbaseN[0], self.NbaseD[1], @@ -4037,7 +4031,7 @@ def projection_N_2form(self, domain): ), dtype=float, ) - N22 = xp.empty( + N22 = np.empty( ( self.NbaseD[0], self.NbaseN[1], @@ -4048,7 +4042,7 @@ def projection_N_2form(self, domain): ), dtype=float, ) - N33 = xp.empty( + N33 = np.empty( ( self.NbaseD[0], self.NbaseD[1], @@ -4066,7 +4060,7 @@ def projection_N_2form(self, domain): n_unique3 = [self.pts[0].flatten().size, self.pts[1].flatten().size, self.x_int[2].size] # ========= assembly of 1 - component (pi2_1 : int, his, his) ============ - mat_eq = xp.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) + mat_eq = np.empty((n_unique1[0], n_unique1[1], n_unique1[2]), dtype=float) ker_eva.kernel_eva( self.x_int[0], @@ -4114,7 +4108,7 @@ def projection_N_2form(self, domain): ) # ========= assembly of 2 - component (pi2_2 : his, int, his) ============ - mat_eq = xp.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) + mat_eq = np.empty((n_unique2[0], n_unique2[1], n_unique2[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -4162,7 +4156,7 @@ def projection_N_2form(self, domain): ) # ========= assembly of 3 - component (pi2_3 : his, his, int) ============ - mat_eq = xp.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) + mat_eq = np.empty((n_unique3[0], n_unique3[1], n_unique3[2]), dtype=float) ker_eva.kernel_eva( self.pts[0].flatten(), @@ -4210,7 +4204,7 @@ def projection_N_2form(self, domain): ) # ========= conversion to sparse matrices (1 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseN[0], self.NbaseD[1], @@ -4218,7 +4212,7 @@ def projection_N_2form(self, domain): self.n_int_nvcof_N[0], self.n_his_nvcof_D[1], self.n_his_nvcof_D[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -4235,7 +4229,7 @@ def projection_N_2form(self, domain): N11.eliminate_zeros() # ========= conversion to sparse matrices (2 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseN[1], @@ -4243,7 +4237,7 @@ def projection_N_2form(self, domain): self.n_his_nvcof_D[0], self.n_int_nvcof_N[1], self.n_his_nvcof_D[2], - ), + ) ) row = self.NbaseN[1] * self.NbaseD[2] * indices[0] + self.NbaseD[2] * indices[1] + indices[2] @@ -4260,7 +4254,7 @@ def projection_N_2form(self, domain): N22.eliminate_zeros() # ========= conversion to sparse matrices (3 - component) ================= - indices = xp.indices( + indices = np.indices( ( self.NbaseD[0], self.NbaseD[1], @@ -4268,7 +4262,7 @@ def projection_N_2form(self, domain): self.n_his_nvcof_D[0], self.n_his_nvcof_D[1], self.n_int_nvcof_N[2], - ), + ) ) row = self.NbaseD[1] * self.NbaseN[2] * indices[0] + self.NbaseN[2] * indices[1] + indices[2] @@ -4329,15 +4323,7 @@ class term_curl_beq: """ def __init__( - self, - tensor_space, - mapping, - kind_map=None, - params_map=None, - tensor_space_F=None, - cx=None, - cy=None, - cz=None, + self, tensor_space, mapping, kind_map=None, params_map=None, tensor_space_F=None, cx=None, cy=None, cz=None ): self.p = tensor_space.p # spline degrees self.Nel = tensor_space.Nel # number of elements @@ -4370,17 +4356,14 @@ def __init__( self.cz = cz # ============= evaluation of background magnetic field at quadrature points ========= - self.mat_curl_beq_1 = xp.empty( - (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), - dtype=float, + self.mat_curl_beq_1 = np.empty( + (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), dtype=float ) - self.mat_curl_beq_2 = xp.empty( - (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), - dtype=float, + self.mat_curl_beq_2 = np.empty( + (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), dtype=float ) - self.mat_curl_beq_3 = xp.empty( - (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), - dtype=float, + self.mat_curl_beq_3 = np.empty( + (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), dtype=float ) if mapping == 0: @@ -4471,23 +4454,20 @@ def __init__( ) # ====================== perturbed magnetic field at quadrature points ========== - self.B1 = xp.empty( - (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), - dtype=float, + self.B1 = np.empty( + (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), dtype=float ) - self.B2 = xp.empty( - (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), - dtype=float, + self.B2 = np.empty( + (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), dtype=float ) - self.B3 = xp.empty( - (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), - dtype=float, + self.B3 = np.empty( + (self.Nel[0], self.Nel[1], self.Nel[2], self.n_quad[0], self.n_quad[1], self.n_quad[2]), dtype=float ) # ========================== inner products ===================================== - self.F1 = xp.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) - self.F2 = xp.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) - self.F3 = xp.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) + self.F1 = np.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) + self.F2 = np.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) + self.F3 = np.empty((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) # ============================================================ def inner_curl_beq(self, b1, b2, b3): @@ -4618,7 +4598,7 @@ def inner_curl_beq(self, b1, b2, b3): # ker_loc_3d.kernel_inner_2(self.Nel[0], self.Nel[1], self.Nel[2], self.p[0], self.p[1], self.p[2], self.n_quad[0], self.n_quad[1], self.n_quad[2], 0, 0, 0, self.wts[0], self.wts[1], self.wts[2], self.basisN[0], self.basisN[1], self.basisN[2], self.NbaseN[0], self.NbaseN[1], self.NbaseN[2], self.F3, self.mat_curl_beq_3) # convert to 1d array and return - return xp.concatenate((self.F1.flatten(), self.F2.flatten(), self.F3.flatten())) + return np.concatenate((self.F1.flatten(), self.F2.flatten(), self.F3.flatten())) # ================ mass matrix in V1 =========================== @@ -4661,9 +4641,9 @@ def mass_curl(tensor_space, kind_map, params_map): Nbj3 = [NbaseD[2], NbaseN[2], NbaseD[2], NbaseN[2], NbaseD[2], NbaseD[2]] # ============= evaluation of background magnetic field at quadrature points ========= - mat_curl_beq_1 = xp.empty((Nel[0], Nel[1], Nel[2], n_quad[0], n_quad[1], n_quad[2]), dtype=float) - mat_curl_beq_2 = xp.empty((Nel[0], Nel[1], Nel[2], n_quad[0], n_quad[1], n_quad[2]), dtype=float) - mat_curl_beq_3 = xp.empty((Nel[0], Nel[1], Nel[2], n_quad[0], n_quad[1], n_quad[2]), dtype=float) + mat_curl_beq_1 = np.empty((Nel[0], Nel[1], Nel[2], n_quad[0], n_quad[1], n_quad[2]), dtype=float) + mat_curl_beq_2 = np.empty((Nel[0], Nel[1], Nel[2], n_quad[0], n_quad[1], n_quad[2]), dtype=float) + mat_curl_beq_3 = np.empty((Nel[0], Nel[1], Nel[2], n_quad[0], n_quad[1], n_quad[2]), dtype=float) ker_eva.kernel_eva_quad(Nel, n_quad, pts[0], pts[1], pts[2], mat_curl_beq_1, 61, kind_map, params_map) ker_eva.kernel_eva_quad(Nel, n_quad, pts[0], pts[1], pts[2], mat_curl_beq_2, 62, kind_map, params_map) @@ -4672,7 +4652,7 @@ def mass_curl(tensor_space, kind_map, params_map): # blocks of global mass matrix M = [ - xp.zeros((Nbi1, Nbi2, Nbi3, 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) + np.zeros((Nbi1, Nbi2, Nbi3, 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) for Nbi1, Nbi2, Nbi3 in zip(Nbi1, Nbi2, Nbi3) ] @@ -4878,11 +4858,11 @@ def mass_curl(tensor_space, kind_map, params_map): counter = 0 for i in range(6): - indices = xp.indices((Nbi1[counter], Nbi2[counter], Nbi3[counter], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) + indices = np.indices((Nbi1[counter], Nbi2[counter], Nbi3[counter], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) - shift1 = xp.arange(Nbi1[counter]) - p[0] - shift2 = xp.arange(Nbi2[counter]) - p[1] - shift3 = xp.arange(Nbi3[counter]) - p[2] + shift1 = np.arange(Nbi1[counter]) - p[0] + shift2 = np.arange(Nbi2[counter]) - p[1] + shift3 = np.arange(Nbi3[counter]) - p[2] row = (Nbi2[counter] * Nbi3[counter] * indices[0] + Nbi3[counter] * indices[1] + indices[2]).flatten() diff --git a/src/struphy/eigenvalue_solvers/legacy/projectors_local/pro_local/projectors_local.py b/src/struphy/eigenvalue_solvers/legacy/projectors_local/pro_local/projectors_local.py index 9ede3f608..72c5babfd 100644 --- a/src/struphy/eigenvalue_solvers/legacy/projectors_local/pro_local/projectors_local.py +++ b/src/struphy/eigenvalue_solvers/legacy/projectors_local/pro_local/projectors_local.py @@ -6,11 +6,11 @@ Classes for local projectors in 1D and 3D based on quasi-spline interpolation and histopolation. """ -import cunumpy as xp import scipy.sparse as spa import struphy.feec.bsplines as bsp import struphy.feec.projectors.pro_local.kernels_projectors_local as ker_loc +from struphy.utils.arrays import xp as np # ======================= 1d ==================================== @@ -41,85 +41,85 @@ def __init__(self, spline_space, n_quad): self.n_quad = n_quad # number of quadrature point per integration interval # Gauss - Legendre quadrature points and weights in (-1, 1) - self.pts_loc = xp.polynomial.legendre.leggauss(self.n_quad)[0] - self.wts_loc = xp.polynomial.legendre.leggauss(self.n_quad)[1] + self.pts_loc = np.polynomial.legendre.leggauss(self.n_quad)[0] + self.wts_loc = np.polynomial.legendre.leggauss(self.n_quad)[1] # set interpolation and histopolation coefficients - if self.bc: - self.coeff_i = xp.zeros((1, 2 * self.p - 1), dtype=float) - self.coeff_h = xp.zeros((1, 2 * self.p), dtype=float) + if self.bc == True: + self.coeff_i = np.zeros((1, 2 * self.p - 1), dtype=float) + self.coeff_h = np.zeros((1, 2 * self.p), dtype=float) if self.p == 1: - self.coeff_i[0, :] = xp.array([1.0]) - self.coeff_h[0, :] = xp.array([1.0, 1.0]) + self.coeff_i[0, :] = np.array([1.0]) + self.coeff_h[0, :] = np.array([1.0, 1.0]) elif self.p == 2: - self.coeff_i[0, :] = 1 / 2 * xp.array([-1.0, 4.0, -1.0]) - self.coeff_h[0, :] = 1 / 2 * xp.array([-1.0, 3.0, 3.0, -1.0]) + self.coeff_i[0, :] = 1 / 2 * np.array([-1.0, 4.0, -1.0]) + self.coeff_h[0, :] = 1 / 2 * np.array([-1.0, 3.0, 3.0, -1.0]) elif self.p == 3: - self.coeff_i[0, :] = 1 / 6 * xp.array([1.0, -8.0, 20.0, -8.0, 1.0]) - self.coeff_h[0, :] = 1 / 6 * xp.array([1.0, -7.0, 12.0, 12.0, -7.0, 1.0]) + self.coeff_i[0, :] = 1 / 6 * np.array([1.0, -8.0, 20.0, -8.0, 1.0]) + self.coeff_h[0, :] = 1 / 6 * np.array([1.0, -7.0, 12.0, 12.0, -7.0, 1.0]) elif self.p == 4: - self.coeff_i[0, :] = 2 / 45 * xp.array([-1.0, 16.0, -295 / 4, 140.0, -295 / 4, 16.0, -1.0]) - self.coeff_h[0, :] = 2 / 45 * xp.array([-1.0, 15.0, -231 / 4, 265 / 4, 265 / 4, -231 / 4, 15.0, -1.0]) + self.coeff_i[0, :] = 2 / 45 * np.array([-1.0, 16.0, -295 / 4, 140.0, -295 / 4, 16.0, -1.0]) + self.coeff_h[0, :] = 2 / 45 * np.array([-1.0, 15.0, -231 / 4, 265 / 4, 265 / 4, -231 / 4, 15.0, -1.0]) else: print("degree > 4 not implemented!") else: - self.coeff_i = xp.zeros((2 * self.p - 1, 2 * self.p - 1), dtype=float) - self.coeff_h = xp.zeros((2 * self.p - 1, 2 * self.p), dtype=float) + self.coeff_i = np.zeros((2 * self.p - 1, 2 * self.p - 1), dtype=float) + self.coeff_h = np.zeros((2 * self.p - 1, 2 * self.p), dtype=float) if self.p == 1: - self.coeff_i[0, :] = xp.array([1.0]) - self.coeff_h[0, :] = xp.array([1.0, 1.0]) + self.coeff_i[0, :] = np.array([1.0]) + self.coeff_h[0, :] = np.array([1.0, 1.0]) elif self.p == 2: - self.coeff_i[0, :] = 1 / 2 * xp.array([2.0, 0.0, 0.0]) - self.coeff_i[1, :] = 1 / 2 * xp.array([-1.0, 4.0, -1.0]) - self.coeff_i[2, :] = 1 / 2 * xp.array([0.0, 0.0, 2.0]) + self.coeff_i[0, :] = 1 / 2 * np.array([2.0, 0.0, 0.0]) + self.coeff_i[1, :] = 1 / 2 * np.array([-1.0, 4.0, -1.0]) + self.coeff_i[2, :] = 1 / 2 * np.array([0.0, 0.0, 2.0]) - self.coeff_h[0, :] = 1 / 2 * xp.array([3.0, -1.0, 0.0, 0.0]) - self.coeff_h[1, :] = 1 / 2 * xp.array([-1.0, 3.0, 3.0, -1.0]) - self.coeff_h[2, :] = 1 / 2 * xp.array([0.0, 0.0, -1.0, 3.0]) + self.coeff_h[0, :] = 1 / 2 * np.array([3.0, -1.0, 0.0, 0.0]) + self.coeff_h[1, :] = 1 / 2 * np.array([-1.0, 3.0, 3.0, -1.0]) + self.coeff_h[2, :] = 1 / 2 * np.array([0.0, 0.0, -1.0, 3.0]) elif self.p == 3: - self.coeff_i[0, :] = 1 / 18 * xp.array([18.0, 0.0, 0.0, 0.0, 0.0]) - self.coeff_i[1, :] = 1 / 18 * xp.array([-5.0, 40.0, -24.0, 8.0, -1.0]) - self.coeff_i[2, :] = 1 / 18 * xp.array([3.0, -24.0, 60.0, -24.0, 3.0]) - self.coeff_i[3, :] = 1 / 18 * xp.array([-1.0, 8.0, -24.0, 40.0, -5.0]) - self.coeff_i[4, :] = 1 / 18 * xp.array([0.0, 0.0, 0.0, 0.0, 18.0]) - - self.coeff_h[0, :] = 1 / 18 * xp.array([23.0, -17.0, 7.0, -1.0, 0.0, 0.0]) - self.coeff_h[1, :] = 1 / 18 * xp.array([-8.0, 56.0, -28.0, 4.0, 0.0, 0.0]) - self.coeff_h[2, :] = 1 / 18 * xp.array([3.0, -21.0, 36.0, 36.0, -21.0, 3.0]) - self.coeff_h[3, :] = 1 / 18 * xp.array([0.0, 0.0, 4.0, -28.0, 56.0, -8.0]) - self.coeff_h[4, :] = 1 / 18 * xp.array([0.0, 0.0, -1.0, 7.0, -17.0, 23.0]) + self.coeff_i[0, :] = 1 / 18 * np.array([18.0, 0.0, 0.0, 0.0, 0.0]) + self.coeff_i[1, :] = 1 / 18 * np.array([-5.0, 40.0, -24.0, 8.0, -1.0]) + self.coeff_i[2, :] = 1 / 18 * np.array([3.0, -24.0, 60.0, -24.0, 3.0]) + self.coeff_i[3, :] = 1 / 18 * np.array([-1.0, 8.0, -24.0, 40.0, -5.0]) + self.coeff_i[4, :] = 1 / 18 * np.array([0.0, 0.0, 0.0, 0.0, 18.0]) + + self.coeff_h[0, :] = 1 / 18 * np.array([23.0, -17.0, 7.0, -1.0, 0.0, 0.0]) + self.coeff_h[1, :] = 1 / 18 * np.array([-8.0, 56.0, -28.0, 4.0, 0.0, 0.0]) + self.coeff_h[2, :] = 1 / 18 * np.array([3.0, -21.0, 36.0, 36.0, -21.0, 3.0]) + self.coeff_h[3, :] = 1 / 18 * np.array([0.0, 0.0, 4.0, -28.0, 56.0, -8.0]) + self.coeff_h[4, :] = 1 / 18 * np.array([0.0, 0.0, -1.0, 7.0, -17.0, 23.0]) elif self.p == 4: - self.coeff_i[0, :] = 1 / 360 * xp.array([360.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) - self.coeff_i[1, :] = 1 / 360 * xp.array([-59.0, 944.0, -1000.0, 720.0, -305.0, 64.0, -4.0]) - self.coeff_i[2, :] = 1 / 360 * xp.array([23.0, -368.0, 1580.0, -1360.0, 605.0, -128.0, 8.0]) - self.coeff_i[3, :] = 1 / 360 * xp.array([-16.0, 256.0, -1180.0, 2240.0, -1180.0, 256.0, -16.0]) - self.coeff_i[4, :] = 1 / 360 * xp.array([8.0, -128.0, 605.0, -1360.0, 1580.0, -368.0, 23.0]) - self.coeff_i[5, :] = 1 / 360 * xp.array([-4.0, 64.0, -305.0, 720.0, -1000.0, 944.0, -59.0]) - self.coeff_i[6, :] = 1 / 360 * xp.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 360.0]) - - self.coeff_h[0, :] = 1 / 360 * xp.array([419.0, -525.0, 475.0, -245.0, 60.0, -4.0, 0.0, 0.0]) - self.coeff_h[1, :] = 1 / 360 * xp.array([-82.0, 1230.0, -1350.0, 730.0, -180.0, 12.0, 0.0, 0.0]) - self.coeff_h[2, :] = 1 / 360 * xp.array([39.0, -585.0, 2175.0, -1425.0, 360.0, -24.0, 0.0, 0.0]) - self.coeff_h[3, :] = 1 / 360 * xp.array([-16.0, 240.0, -924.0, 1060.0, 1060.0, -924.0, 240.0, -16.0]) - self.coeff_h[4, :] = 1 / 360 * xp.array([0.0, 0.0, -24.0, 360.0, -1425.0, 2175.0, -585.0, 39.0]) - self.coeff_h[5, :] = 1 / 360 * xp.array([0.0, 0.0, 12.0, -180.0, 730.0, -1350.0, 1230.0, -82.0]) - self.coeff_h[6, :] = 1 / 360 * xp.array([0.0, 0.0, -4.0, 60.0, -245.0, 475.0, -525.0, 419.0]) + self.coeff_i[0, :] = 1 / 360 * np.array([360.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + self.coeff_i[1, :] = 1 / 360 * np.array([-59.0, 944.0, -1000.0, 720.0, -305.0, 64.0, -4.0]) + self.coeff_i[2, :] = 1 / 360 * np.array([23.0, -368.0, 1580.0, -1360.0, 605.0, -128.0, 8.0]) + self.coeff_i[3, :] = 1 / 360 * np.array([-16.0, 256.0, -1180.0, 2240.0, -1180.0, 256.0, -16.0]) + self.coeff_i[4, :] = 1 / 360 * np.array([8.0, -128.0, 605.0, -1360.0, 1580.0, -368.0, 23.0]) + self.coeff_i[5, :] = 1 / 360 * np.array([-4.0, 64.0, -305.0, 720.0, -1000.0, 944.0, -59.0]) + self.coeff_i[6, :] = 1 / 360 * np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 360.0]) + + self.coeff_h[0, :] = 1 / 360 * np.array([419.0, -525.0, 475.0, -245.0, 60.0, -4.0, 0.0, 0.0]) + self.coeff_h[1, :] = 1 / 360 * np.array([-82.0, 1230.0, -1350.0, 730.0, -180.0, 12.0, 0.0, 0.0]) + self.coeff_h[2, :] = 1 / 360 * np.array([39.0, -585.0, 2175.0, -1425.0, 360.0, -24.0, 0.0, 0.0]) + self.coeff_h[3, :] = 1 / 360 * np.array([-16.0, 240.0, -924.0, 1060.0, 1060.0, -924.0, 240.0, -16.0]) + self.coeff_h[4, :] = 1 / 360 * np.array([0.0, 0.0, -24.0, 360.0, -1425.0, 2175.0, -585.0, 39.0]) + self.coeff_h[5, :] = 1 / 360 * np.array([0.0, 0.0, 12.0, -180.0, 730.0, -1350.0, 1230.0, -82.0]) + self.coeff_h[6, :] = 1 / 360 * np.array([0.0, 0.0, -4.0, 60.0, -245.0, 475.0, -525.0, 419.0]) else: print("degree > 4 not implemented!") # set interpolation points - n_lambda_int = xp.copy(self.NbaseN) # number of coefficients in space V0 + n_lambda_int = np.copy(self.NbaseN) # number of coefficients in space V0 self.n_int = 2 * self.p - 1 # number of local interpolation points (1, 3, 5, 7, ...) if self.p == 1: @@ -134,25 +134,23 @@ def __init__(self, spline_space, n_quad): 2 * self.p - 2 ) # number of non-vanishing D bf in interpolation interval (1, 2, 4, 6, ...) - self.x_int = xp.zeros((n_lambda_int, self.n_int), dtype=float) # interpolation points for each coeff. + self.x_int = np.zeros((n_lambda_int, self.n_int), dtype=float) # interpolation points for each coeff. - self.int_global_N = xp.zeros( - (n_lambda_int, self.n_int_locbf_N), - dtype=int, + self.int_global_N = np.zeros( + (n_lambda_int, self.n_int_locbf_N), dtype=int ) # global indices of non-vanishing N bf - self.int_global_D = xp.zeros( - (n_lambda_int, self.n_int_locbf_D), - dtype=int, + self.int_global_D = np.zeros( + (n_lambda_int, self.n_int_locbf_D), dtype=int ) # global indices of non-vanishing D bf - self.int_loccof_N = xp.zeros((n_lambda_int, self.n_int_locbf_N), dtype=int) # index of non-vanishing coeff. (N) - self.int_loccof_D = xp.zeros((n_lambda_int, self.n_int_locbf_D), dtype=int) # index of non-vanishing coeff. (D) + self.int_loccof_N = np.zeros((n_lambda_int, self.n_int_locbf_N), dtype=int) # index of non-vanishing coeff. (N) + self.int_loccof_D = np.zeros((n_lambda_int, self.n_int_locbf_D), dtype=int) # index of non-vanishing coeff. (D) - self.x_int_indices = xp.zeros((n_lambda_int, self.n_int), dtype=int) + self.x_int_indices = np.zeros((n_lambda_int, self.n_int), dtype=int) - self.coeffi_indices = xp.zeros(n_lambda_int, dtype=int) + self.coeffi_indices = np.zeros(n_lambda_int, dtype=int) - if not self.bc: + if self.bc == False: # maximum number of non-vanishing coefficients if self.p == 1: self.n_int_nvcof_D = 2 @@ -162,39 +160,39 @@ def __init__(self, spline_space, n_quad): self.n_int_nvcof_N = 3 * self.p - 2 # shift in local coefficient indices at right boundary (only for non-periodic boundary conditions) - self.int_add_D = xp.arange(self.n_int - 2) + 1 - self.int_add_N = xp.arange(self.n_int - 1) + 1 + self.int_add_D = np.arange(self.n_int - 2) + 1 + self.int_add_N = np.arange(self.n_int - 1) + 1 counter_D = 0 counter_N = 0 # shift local coefficients --> global coefficients (D) if self.p == 1: - self.int_shift_D = xp.arange(self.NbaseD) + self.int_shift_D = np.arange(self.NbaseD) else: - self.int_shift_D = xp.arange(self.NbaseD) - (self.p - 2) + self.int_shift_D = np.arange(self.NbaseD) - (self.p - 2) self.int_shift_D[: 2 * self.p - 2] = 0 self.int_shift_D[-(2 * self.p - 2) :] = self.int_shift_D[-(2 * self.p - 2)] # shift local coefficients --> global coefficients (N) if self.p == 1: - self.int_shift_N = xp.arange(self.NbaseN) + self.int_shift_N = np.arange(self.NbaseN) self.int_shift_N[-1] = self.int_shift_N[-2] else: - self.int_shift_N = xp.arange(self.NbaseN) - (self.p - 1) + self.int_shift_N = np.arange(self.NbaseN) - (self.p - 1) self.int_shift_N[: 2 * self.p - 1] = 0 self.int_shift_N[-(2 * self.p - 1) :] = self.int_shift_N[-(2 * self.p - 1)] - counter_coeffi = xp.copy(self.p) + counter_coeffi = np.copy(self.p) for i in range(n_lambda_int): # left boundary region if i < self.p - 1: - self.int_global_N[i] = xp.arange(self.n_int_locbf_N) - self.int_global_D[i] = xp.arange(self.n_int_locbf_D) + self.int_global_N[i] = np.arange(self.n_int_locbf_N) + self.int_global_D[i] = np.arange(self.n_int_locbf_D) - self.x_int_indices[i] = xp.arange(self.n_int) + self.x_int_indices[i] = np.arange(self.n_int) self.coeffi_indices[i] = i for j in range(2 * (self.p - 1) + 1): xi = self.p - 1 @@ -202,10 +200,10 @@ def __init__(self, spline_space, n_quad): # right boundary region elif i > n_lambda_int - self.p: - self.int_global_N[i] = xp.arange(self.n_int_locbf_N) + n_lambda_int - self.p - (self.p - 1) - self.int_global_D[i] = xp.arange(self.n_int_locbf_D) + n_lambda_int - self.p - (self.p - 1) + self.int_global_N[i] = np.arange(self.n_int_locbf_N) + n_lambda_int - self.p - (self.p - 1) + self.int_global_D[i] = np.arange(self.n_int_locbf_D) + n_lambda_int - self.p - (self.p - 1) - self.x_int_indices[i] = xp.arange(self.n_int) + 2 * (n_lambda_int - self.p - (self.p - 1)) + self.x_int_indices[i] = np.arange(self.n_int) + 2 * (n_lambda_int - self.p - (self.p - 1)) self.coeffi_indices[i] = counter_coeffi counter_coeffi += 1 for j in range(2 * (self.p - 1) + 1): @@ -215,20 +213,20 @@ def __init__(self, spline_space, n_quad): # interior else: if self.p == 1: - self.int_global_N[i] = xp.arange(self.n_int_locbf_N) + i - self.int_global_D[i] = xp.arange(self.n_int_locbf_D) + i + self.int_global_N[i] = np.arange(self.n_int_locbf_N) + i + self.int_global_D[i] = np.arange(self.n_int_locbf_D) + i self.int_global_N[-1] = self.int_global_N[-2] self.int_global_D[-1] = self.int_global_D[-2] else: - self.int_global_N[i] = xp.arange(self.n_int_locbf_N) + i - (self.p - 1) - self.int_global_D[i] = xp.arange(self.n_int_locbf_D) + i - (self.p - 1) + self.int_global_N[i] = np.arange(self.n_int_locbf_N) + i - (self.p - 1) + self.int_global_D[i] = np.arange(self.n_int_locbf_D) + i - (self.p - 1) if self.p == 1: self.x_int_indices[i] = i else: - self.x_int_indices[i] = xp.arange(self.n_int) + 2 * (i - (self.p - 1)) + self.x_int_indices[i] = np.arange(self.n_int) + 2 * (i - (self.p - 1)) self.coeffi_indices[i] = self.p - 1 for j in range(2 * (self.p - 1) + 1): @@ -236,8 +234,8 @@ def __init__(self, spline_space, n_quad): # local coefficient index if self.p == 1: - self.int_loccof_N[i] = xp.array([0, 1]) - self.int_loccof_D[-1] = xp.array([1]) + self.int_loccof_N[i] = np.array([0, 1]) + self.int_loccof_D[-1] = np.array([1]) else: if i > 0: @@ -245,8 +243,8 @@ def __init__(self, spline_space, n_quad): k_glob_new = self.int_global_D[i, il] bol = k_glob_new == self.int_global_D[i - 1] - if xp.any(bol): - self.int_loccof_D[i, il] = self.int_loccof_D[i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.int_loccof_D[i, il] = self.int_loccof_D[i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_int - self.p - (self.p - 2)) and (self.int_loccof_D[i, il] == 0): self.int_loccof_D[i, il] = self.int_add_D[counter_D] @@ -256,8 +254,8 @@ def __init__(self, spline_space, n_quad): k_glob_new = self.int_global_N[i, il] bol = k_glob_new == self.int_global_N[i - 1] - if xp.any(bol): - self.int_loccof_N[i, il] = self.int_loccof_N[i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.int_loccof_N[i, il] = self.int_loccof_N[i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_int - self.p - (self.p - 2)) and (self.int_loccof_N[i, il] == 0): self.int_loccof_N[i, il] = self.int_add_N[counter_N] @@ -275,24 +273,24 @@ def __init__(self, spline_space, n_quad): # shift local coefficients --> global coefficients if self.p == 1: - self.int_shift_D = xp.arange(self.NbaseN) - (self.p - 1) - self.int_shift_N = xp.arange(self.NbaseN) - (self.p) + self.int_shift_D = np.arange(self.NbaseN) - (self.p - 1) + self.int_shift_N = np.arange(self.NbaseN) - (self.p) else: - self.int_shift_D = xp.arange(self.NbaseN) - (self.p - 2) - self.int_shift_N = xp.arange(self.NbaseN) - (self.p - 1) + self.int_shift_D = np.arange(self.NbaseN) - (self.p - 2) + self.int_shift_N = np.arange(self.NbaseN) - (self.p - 1) for i in range(n_lambda_int): # global indices of non-vanishing basis functions and position of coefficients in final matrix - self.int_global_D[i] = (xp.arange(self.n_int_locbf_D) + i - (self.p - 1)) % self.NbaseD - self.int_loccof_D[i] = xp.arange(self.n_int_locbf_D - 1, -1, -1) + self.int_global_D[i] = (np.arange(self.n_int_locbf_D) + i - (self.p - 1)) % self.NbaseD + self.int_loccof_D[i] = np.arange(self.n_int_locbf_D - 1, -1, -1) - self.int_global_N[i] = (xp.arange(self.n_int_locbf_N) + i - (self.p - 1)) % self.NbaseN - self.int_loccof_N[i] = xp.arange(self.n_int_locbf_N - 1, -1, -1) + self.int_global_N[i] = (np.arange(self.n_int_locbf_N) + i - (self.p - 1)) % self.NbaseN + self.int_loccof_N[i] = np.arange(self.n_int_locbf_N - 1, -1, -1) if self.p == 1: self.x_int_indices[i] = i else: - self.x_int_indices[i] = xp.arange(self.n_int) + 2 * (i - (self.p - 1)) + self.x_int_indices[i] = np.arange(self.n_int) + 2 * (i - (self.p - 1)) self.coeffi_indices[i] = 0 @@ -300,55 +298,55 @@ def __init__(self, spline_space, n_quad): self.x_int[i, j] = ((self.T[i + 1 + int(j / 2)] + self.T[i + 1 + int((j + 1) / 2)]) / 2) % 1.0 # set histopolation points, quadrature points and weights - n_lambda_his = xp.copy(self.NbaseD) # number of coefficients in space V1 + n_lambda_his = np.copy(self.NbaseD) # number of coefficients in space V1 self.n_his = 2 * self.p # number of histopolation intervals (2, 4, 6, 8, ...) self.n_his_locbf_N = 2 * self.p # number of non-vanishing N bf in histopolation interval (2, 4, 6, 8, ...) self.n_his_locbf_D = 2 * self.p - 1 # number of non-vanishing D bf in histopolation interval (2, 4, 6, 8, ...) - self.x_his = xp.zeros((n_lambda_his, self.n_his + 1), dtype=float) # histopolation boundaries + self.x_his = np.zeros((n_lambda_his, self.n_his + 1), dtype=float) # histopolation boundaries - self.his_global_N = xp.zeros((n_lambda_his, self.n_his_locbf_N), dtype=int) - self.his_global_D = xp.zeros((n_lambda_his, self.n_his_locbf_D), dtype=int) + self.his_global_N = np.zeros((n_lambda_his, self.n_his_locbf_N), dtype=int) + self.his_global_D = np.zeros((n_lambda_his, self.n_his_locbf_D), dtype=int) - self.his_loccof_N = xp.zeros((n_lambda_his, self.n_his_locbf_N), dtype=int) - self.his_loccof_D = xp.zeros((n_lambda_his, self.n_his_locbf_D), dtype=int) + self.his_loccof_N = np.zeros((n_lambda_his, self.n_his_locbf_N), dtype=int) + self.his_loccof_D = np.zeros((n_lambda_his, self.n_his_locbf_D), dtype=int) - self.x_his_indices = xp.zeros((n_lambda_his, self.n_his), dtype=int) + self.x_his_indices = np.zeros((n_lambda_his, self.n_his), dtype=int) - self.coeffh_indices = xp.zeros(n_lambda_his, dtype=int) + self.coeffh_indices = np.zeros(n_lambda_his, dtype=int) - if not self.bc: + if self.bc == False: # maximum number of non-vanishing coefficients self.n_his_nvcof_D = 3 * self.p - 2 self.n_his_nvcof_N = 3 * self.p - 1 # shift in local coefficient indices at right boundary (only for non-periodic boundary conditions) - self.his_add_D = xp.arange(self.n_his - 2) + 1 - self.his_add_N = xp.arange(self.n_his - 1) + 1 + self.his_add_D = np.arange(self.n_his - 2) + 1 + self.his_add_N = np.arange(self.n_his - 1) + 1 counter_D = 0 counter_N = 0 # shift local coefficients --> global coefficients (D) - self.his_shift_D = xp.arange(self.NbaseD) - (self.p - 1) + self.his_shift_D = np.arange(self.NbaseD) - (self.p - 1) self.his_shift_D[: 2 * self.p - 1] = 0 self.his_shift_D[-(2 * self.p - 1) :] = self.his_shift_D[-(2 * self.p - 1)] # shift local coefficients --> global coefficients (N) - self.his_shift_N = xp.arange(self.NbaseN) - self.p + self.his_shift_N = np.arange(self.NbaseN) - self.p self.his_shift_N[: 2 * self.p] = 0 self.his_shift_N[-2 * self.p :] = self.his_shift_N[-2 * self.p] - counter_coeffh = xp.copy(self.p) + counter_coeffh = np.copy(self.p) for i in range(n_lambda_his): # left boundary region if i < self.p - 1: - self.his_global_N[i] = xp.arange(self.n_his_locbf_N) - self.his_global_D[i] = xp.arange(self.n_his_locbf_D) + self.his_global_N[i] = np.arange(self.n_his_locbf_N) + self.his_global_D[i] = np.arange(self.n_his_locbf_D) - self.x_his_indices[i] = xp.arange(self.n_his) + self.x_his_indices[i] = np.arange(self.n_his) self.coeffh_indices[i] = i for j in range(2 * self.p + 1): xi = self.p - 1 @@ -356,10 +354,10 @@ def __init__(self, spline_space, n_quad): # right boundary region elif i > n_lambda_his - self.p: - self.his_global_N[i] = xp.arange(self.n_his_locbf_N) + n_lambda_his - self.p - (self.p - 1) - self.his_global_D[i] = xp.arange(self.n_his_locbf_D) + n_lambda_his - self.p - (self.p - 1) + self.his_global_N[i] = np.arange(self.n_his_locbf_N) + n_lambda_his - self.p - (self.p - 1) + self.his_global_D[i] = np.arange(self.n_his_locbf_D) + n_lambda_his - self.p - (self.p - 1) - self.x_his_indices[i] = xp.arange(self.n_his) + 2 * (n_lambda_his - self.p - (self.p - 1)) + self.x_his_indices[i] = np.arange(self.n_his) + 2 * (n_lambda_his - self.p - (self.p - 1)) self.coeffh_indices[i] = counter_coeffh counter_coeffh += 1 for j in range(2 * self.p + 1): @@ -368,10 +366,10 @@ def __init__(self, spline_space, n_quad): # interior else: - self.his_global_N[i] = xp.arange(self.n_his_locbf_N) + i - (self.p - 1) - self.his_global_D[i] = xp.arange(self.n_his_locbf_D) + i - (self.p - 1) + self.his_global_N[i] = np.arange(self.n_his_locbf_N) + i - (self.p - 1) + self.his_global_D[i] = np.arange(self.n_his_locbf_D) + i - (self.p - 1) - self.x_his_indices[i] = xp.arange(self.n_his) + 2 * (i - (self.p - 1)) + self.x_his_indices[i] = np.arange(self.n_his) + 2 * (i - (self.p - 1)) self.coeffh_indices[i] = self.p - 1 for j in range(2 * self.p + 1): self.x_his[i, j] = (self.T[i + 1 + int(j / 2)] + self.T[i + 1 + int((j + 1) / 2)]) / 2 @@ -382,8 +380,8 @@ def __init__(self, spline_space, n_quad): k_glob_new = self.his_global_D[i, il] bol = k_glob_new == self.his_global_D[i - 1] - if xp.any(bol): - self.his_loccof_D[i, il] = self.his_loccof_D[i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.his_loccof_D[i, il] = self.his_loccof_D[i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_his - self.p - (self.p - 2)) and (self.his_loccof_D[i, il] == 0): self.his_loccof_D[i, il] = self.his_add_D[counter_D] @@ -393,15 +391,15 @@ def __init__(self, spline_space, n_quad): k_glob_new = self.his_global_N[i, il] bol = k_glob_new == self.his_global_N[i - 1] - if xp.any(bol): - self.his_loccof_N[i, il] = self.his_loccof_N[i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.his_loccof_N[i, il] = self.his_loccof_N[i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_his - self.p - (self.p - 2)) and (self.his_loccof_N[i, il] == 0): self.his_loccof_N[i, il] = self.his_add_N[counter_N] counter_N += 1 # quadrature points and weights - self.pts, self.wts = bsp.quadrature_grid(xp.unique(self.x_his.flatten()), self.pts_loc, self.wts_loc) + self.pts, self.wts = bsp.quadrature_grid(np.unique(self.x_his.flatten()), self.pts_loc, self.wts_loc) else: # maximum number of non-vanishing coefficients @@ -409,33 +407,31 @@ def __init__(self, spline_space, n_quad): self.n_his_nvcof_N = 2 * self.p # shift local coefficients --> global coefficients - self.his_shift_D = xp.arange(self.NbaseD) - (self.p - 1) - self.his_shift_N = xp.arange(self.NbaseD) - self.p + self.his_shift_D = np.arange(self.NbaseD) - (self.p - 1) + self.his_shift_N = np.arange(self.NbaseD) - self.p for i in range(n_lambda_his): - self.his_global_N[i] = (xp.arange(self.n_his_locbf_N) + i - (self.p - 1)) % self.NbaseN - self.his_global_D[i] = (xp.arange(self.n_his_locbf_D) + i - (self.p - 1)) % self.NbaseD - self.his_loccof_N[i] = xp.arange(self.n_his_locbf_N - 1, -1, -1) - self.his_loccof_D[i] = xp.arange(self.n_his_locbf_D - 1, -1, -1) + self.his_global_N[i] = (np.arange(self.n_his_locbf_N) + i - (self.p - 1)) % self.NbaseN + self.his_global_D[i] = (np.arange(self.n_his_locbf_D) + i - (self.p - 1)) % self.NbaseD + self.his_loccof_N[i] = np.arange(self.n_his_locbf_N - 1, -1, -1) + self.his_loccof_D[i] = np.arange(self.n_his_locbf_D - 1, -1, -1) - self.x_his_indices[i] = xp.arange(self.n_his) + 2 * (i - (self.p - 1)) + self.x_his_indices[i] = np.arange(self.n_his) + 2 * (i - (self.p - 1)) self.coeffh_indices[i] = 0 for j in range(2 * self.p + 1): self.x_his[i, j] = (self.T[i + 1 + int(j / 2)] + self.T[i + 1 + int((j + 1) / 2)]) / 2 # quadrature points and weights self.pts, self.wts = bsp.quadrature_grid( - xp.append(xp.unique(self.x_his.flatten() % 1.0), 1.0), - self.pts_loc, - self.wts_loc, + np.append(np.unique(self.x_his.flatten() % 1.0), 1.0), self.pts_loc, self.wts_loc ) # quasi interpolation def pi_0(self, fun): - lambdas = xp.zeros(self.NbaseN, dtype=float) + lambdas = np.zeros(self.NbaseN, dtype=float) # evaluate function at interpolation points - mat_f = fun(xp.unique(self.x_int.flatten())) + mat_f = fun(np.unique(self.x_int.flatten())) for i in range(self.NbaseN): for j in range(self.n_int): @@ -445,7 +441,7 @@ def pi_0(self, fun): # quasi histopolation def pi_1(self, fun): - lambdas = xp.zeros(self.NbaseD, dtype=float) + lambdas = np.zeros(self.NbaseD, dtype=float) # evaluate function at quadrature points mat_f = fun(self.pts) @@ -463,17 +459,17 @@ def pi_1(self, fun): # projection matrices of products of basis functions: pi0_i(A_j*B_k) and pi1_i(A_j*B_k) def projection_matrices_1d(self, bc_kind=["free", "free"]): - PI0_NN = xp.empty((self.NbaseN, self.NbaseN, self.NbaseN), dtype=float) - PI0_DN = xp.empty((self.NbaseN, self.NbaseD, self.NbaseN), dtype=float) - PI0_DD = xp.empty((self.NbaseN, self.NbaseD, self.NbaseD), dtype=float) + PI0_NN = np.empty((self.NbaseN, self.NbaseN, self.NbaseN), dtype=float) + PI0_DN = np.empty((self.NbaseN, self.NbaseD, self.NbaseN), dtype=float) + PI0_DD = np.empty((self.NbaseN, self.NbaseD, self.NbaseD), dtype=float) - PI1_NN = xp.empty((self.NbaseD, self.NbaseN, self.NbaseN), dtype=float) - PI1_DN = xp.empty((self.NbaseD, self.NbaseD, self.NbaseN), dtype=float) - PI1_DD = xp.empty((self.NbaseD, self.NbaseD, self.NbaseD), dtype=float) + PI1_NN = np.empty((self.NbaseD, self.NbaseN, self.NbaseN), dtype=float) + PI1_DN = np.empty((self.NbaseD, self.NbaseD, self.NbaseN), dtype=float) + PI1_DD = np.empty((self.NbaseD, self.NbaseD, self.NbaseD), dtype=float) # ========= PI0__NN and PI1_NN ============= - ci = xp.zeros(self.NbaseN, dtype=float) - cj = xp.zeros(self.NbaseN, dtype=float) + ci = np.zeros(self.NbaseN, dtype=float) + cj = np.zeros(self.NbaseN, dtype=float) for i in range(self.NbaseN): for j in range(self.NbaseN): @@ -489,8 +485,8 @@ def projection_matrices_1d(self, bc_kind=["free", "free"]): PI1_NN[:, i, j] = self.pi_1(fun) # ========= PI0__DN and PI1_DN ============= - ci = xp.zeros(self.NbaseD, dtype=float) - cj = xp.zeros(self.NbaseN, dtype=float) + ci = np.zeros(self.NbaseD, dtype=float) + cj = np.zeros(self.NbaseN, dtype=float) for i in range(self.NbaseD): for j in range(self.NbaseN): @@ -506,8 +502,8 @@ def projection_matrices_1d(self, bc_kind=["free", "free"]): PI1_DN[:, i, j] = self.pi_1(fun) # ========= PI0__DD and PI1_DD ============= - ci = xp.zeros(self.NbaseD, dtype=float) - cj = xp.zeros(self.NbaseD, dtype=float) + ci = np.zeros(self.NbaseD, dtype=float) + cj = np.zeros(self.NbaseD, dtype=float) for i in range(self.NbaseD): for j in range(self.NbaseD): @@ -522,8 +518,8 @@ def projection_matrices_1d(self, bc_kind=["free", "free"]): PI0_DD[:, i, j] = self.pi_0(fun) PI1_DD[:, i, j] = self.pi_1(fun) - PI0_ND = xp.transpose(PI0_DN, (0, 2, 1)) - PI1_ND = xp.transpose(PI1_DN, (0, 2, 1)) + PI0_ND = np.transpose(PI0_DN, (0, 2, 1)) + PI1_ND = np.transpose(PI1_DN, (0, 2, 1)) # remove contributions from first and last N-splines if bc_kind[0] == "dirichlet": @@ -548,25 +544,25 @@ def projection_matrices_1d(self, bc_kind=["free", "free"]): PI1_DN[:, :, -1] = 0.0 PI1_ND[:, -1, :] = 0.0 - PI0_NN_indices = xp.nonzero(PI0_NN) - PI0_DN_indices = xp.nonzero(PI0_DN) - PI0_ND_indices = xp.nonzero(PI0_ND) - PI0_DD_indices = xp.nonzero(PI0_DD) + PI0_NN_indices = np.nonzero(PI0_NN) + PI0_DN_indices = np.nonzero(PI0_DN) + PI0_ND_indices = np.nonzero(PI0_ND) + PI0_DD_indices = np.nonzero(PI0_DD) - PI1_NN_indices = xp.nonzero(PI1_NN) - PI1_DN_indices = xp.nonzero(PI1_DN) - PI1_ND_indices = xp.nonzero(PI1_ND) - PI1_DD_indices = xp.nonzero(PI1_DD) + PI1_NN_indices = np.nonzero(PI1_NN) + PI1_DN_indices = np.nonzero(PI1_DN) + PI1_ND_indices = np.nonzero(PI1_ND) + PI1_DD_indices = np.nonzero(PI1_DD) - PI0_NN_indices = xp.vstack((PI0_NN_indices[0], PI0_NN_indices[1], PI0_NN_indices[2])) - PI0_DN_indices = xp.vstack((PI0_DN_indices[0], PI0_DN_indices[1], PI0_DN_indices[2])) - PI0_ND_indices = xp.vstack((PI0_ND_indices[0], PI0_ND_indices[1], PI0_ND_indices[2])) - PI0_DD_indices = xp.vstack((PI0_DD_indices[0], PI0_DD_indices[1], PI0_DD_indices[2])) + PI0_NN_indices = np.vstack((PI0_NN_indices[0], PI0_NN_indices[1], PI0_NN_indices[2])) + PI0_DN_indices = np.vstack((PI0_DN_indices[0], PI0_DN_indices[1], PI0_DN_indices[2])) + PI0_ND_indices = np.vstack((PI0_ND_indices[0], PI0_ND_indices[1], PI0_ND_indices[2])) + PI0_DD_indices = np.vstack((PI0_DD_indices[0], PI0_DD_indices[1], PI0_DD_indices[2])) - PI1_NN_indices = xp.vstack((PI1_NN_indices[0], PI1_NN_indices[1], PI1_NN_indices[2])) - PI1_DN_indices = xp.vstack((PI1_DN_indices[0], PI1_DN_indices[1], PI1_DN_indices[2])) - PI1_ND_indices = xp.vstack((PI1_ND_indices[0], PI1_ND_indices[1], PI1_ND_indices[2])) - PI1_DD_indices = xp.vstack((PI1_DD_indices[0], PI1_DD_indices[1], PI1_DD_indices[2])) + PI1_NN_indices = np.vstack((PI1_NN_indices[0], PI1_NN_indices[1], PI1_NN_indices[2])) + PI1_DN_indices = np.vstack((PI1_DN_indices[0], PI1_DN_indices[1], PI1_DN_indices[2])) + PI1_ND_indices = np.vstack((PI1_ND_indices[0], PI1_ND_indices[1], PI1_ND_indices[2])) + PI1_DD_indices = np.vstack((PI1_DD_indices[0], PI1_DD_indices[1], PI1_DD_indices[2])) return ( PI0_NN, @@ -621,87 +617,87 @@ def __init__(self, tensor_space, n_quad): self.polar = False # local projectors for polar splines are not implemented yet # Gauss - Legendre quadrature points and weights in (-1, 1) - self.pts_loc = [xp.polynomial.legendre.leggauss(n_quad)[0] for n_quad in self.n_quad] - self.wts_loc = [xp.polynomial.legendre.leggauss(n_quad)[1] for n_quad in self.n_quad] + self.pts_loc = [np.polynomial.legendre.leggauss(n_quad)[0] for n_quad in self.n_quad] + self.wts_loc = [np.polynomial.legendre.leggauss(n_quad)[1] for n_quad in self.n_quad] # set interpolation and histopolation coefficients self.coeff_i = [0, 0, 0] self.coeff_h = [0, 0, 0] for a in range(3): - if self.bc[a]: - self.coeff_i[a] = xp.zeros((1, 2 * self.p[a] - 1), dtype=float) - self.coeff_h[a] = xp.zeros((1, 2 * self.p[a]), dtype=float) + if self.bc[a] == True: + self.coeff_i[a] = np.zeros((1, 2 * self.p[a] - 1), dtype=float) + self.coeff_h[a] = np.zeros((1, 2 * self.p[a]), dtype=float) if self.p[a] == 1: - self.coeff_i[a][0, :] = xp.array([1.0]) - self.coeff_h[a][0, :] = xp.array([1.0, 1.0]) + self.coeff_i[a][0, :] = np.array([1.0]) + self.coeff_h[a][0, :] = np.array([1.0, 1.0]) elif self.p[a] == 2: - self.coeff_i[a][0, :] = 1 / 2 * xp.array([-1.0, 4.0, -1.0]) - self.coeff_h[a][0, :] = 1 / 2 * xp.array([-1.0, 3.0, 3.0, -1.0]) + self.coeff_i[a][0, :] = 1 / 2 * np.array([-1.0, 4.0, -1.0]) + self.coeff_h[a][0, :] = 1 / 2 * np.array([-1.0, 3.0, 3.0, -1.0]) elif self.p[a] == 3: - self.coeff_i[a][0, :] = 1 / 6 * xp.array([1.0, -8.0, 20.0, -8.0, 1.0]) - self.coeff_h[a][0, :] = 1 / 6 * xp.array([1.0, -7.0, 12.0, 12.0, -7.0, 1.0]) + self.coeff_i[a][0, :] = 1 / 6 * np.array([1.0, -8.0, 20.0, -8.0, 1.0]) + self.coeff_h[a][0, :] = 1 / 6 * np.array([1.0, -7.0, 12.0, 12.0, -7.0, 1.0]) elif self.p[a] == 4: - self.coeff_i[a][0, :] = 2 / 45 * xp.array([-1.0, 16.0, -295 / 4, 140.0, -295 / 4, 16.0, -1.0]) + self.coeff_i[a][0, :] = 2 / 45 * np.array([-1.0, 16.0, -295 / 4, 140.0, -295 / 4, 16.0, -1.0]) self.coeff_h[a][0, :] = ( - 2 / 45 * xp.array([-1.0, 15.0, -231 / 4, 265 / 4, 265 / 4, -231 / 4, 15.0, -1.0]) + 2 / 45 * np.array([-1.0, 15.0, -231 / 4, 265 / 4, 265 / 4, -231 / 4, 15.0, -1.0]) ) else: print("degree > 4 not implemented!") else: - self.coeff_i[a] = xp.zeros((2 * self.p[a] - 1, 2 * self.p[a] - 1), dtype=float) - self.coeff_h[a] = xp.zeros((2 * self.p[a] - 1, 2 * self.p[a]), dtype=float) + self.coeff_i[a] = np.zeros((2 * self.p[a] - 1, 2 * self.p[a] - 1), dtype=float) + self.coeff_h[a] = np.zeros((2 * self.p[a] - 1, 2 * self.p[a]), dtype=float) if self.p[a] == 1: - self.coeff_i[a][0, :] = xp.array([1.0]) - self.coeff_h[a][0, :] = xp.array([1.0, 1.0]) + self.coeff_i[a][0, :] = np.array([1.0]) + self.coeff_h[a][0, :] = np.array([1.0, 1.0]) elif self.p[a] == 2: - self.coeff_i[a][0, :] = 1 / 2 * xp.array([2.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 2 * xp.array([-1.0, 4.0, -1.0]) - self.coeff_i[a][2, :] = 1 / 2 * xp.array([0.0, 0.0, 2.0]) + self.coeff_i[a][0, :] = 1 / 2 * np.array([2.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 2 * np.array([-1.0, 4.0, -1.0]) + self.coeff_i[a][2, :] = 1 / 2 * np.array([0.0, 0.0, 2.0]) - self.coeff_h[a][0, :] = 1 / 2 * xp.array([3.0, -1.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 2 * xp.array([-1.0, 3.0, 3.0, -1.0]) - self.coeff_h[a][2, :] = 1 / 2 * xp.array([0.0, 0.0, -1.0, 3.0]) + self.coeff_h[a][0, :] = 1 / 2 * np.array([3.0, -1.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 2 * np.array([-1.0, 3.0, 3.0, -1.0]) + self.coeff_h[a][2, :] = 1 / 2 * np.array([0.0, 0.0, -1.0, 3.0]) elif self.p[a] == 3: - self.coeff_i[a][0, :] = 1 / 18 * xp.array([18.0, 0.0, 0.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 18 * xp.array([-5.0, 40.0, -24.0, 8.0, -1.0]) - self.coeff_i[a][2, :] = 1 / 18 * xp.array([3.0, -24.0, 60.0, -24.0, 3.0]) - self.coeff_i[a][3, :] = 1 / 18 * xp.array([-1.0, 8.0, -24.0, 40.0, -5.0]) - self.coeff_i[a][4, :] = 1 / 18 * xp.array([0.0, 0.0, 0.0, 0.0, 18.0]) - - self.coeff_h[a][0, :] = 1 / 18 * xp.array([23.0, -17.0, 7.0, -1.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 18 * xp.array([-8.0, 56.0, -28.0, 4.0, 0.0, 0.0]) - self.coeff_h[a][2, :] = 1 / 18 * xp.array([3.0, -21.0, 36.0, 36.0, -21.0, 3.0]) - self.coeff_h[a][3, :] = 1 / 18 * xp.array([0.0, 0.0, 4.0, -28.0, 56.0, -8.0]) - self.coeff_h[a][4, :] = 1 / 18 * xp.array([0.0, 0.0, -1.0, 7.0, -17.0, 23.0]) + self.coeff_i[a][0, :] = 1 / 18 * np.array([18.0, 0.0, 0.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 18 * np.array([-5.0, 40.0, -24.0, 8.0, -1.0]) + self.coeff_i[a][2, :] = 1 / 18 * np.array([3.0, -24.0, 60.0, -24.0, 3.0]) + self.coeff_i[a][3, :] = 1 / 18 * np.array([-1.0, 8.0, -24.0, 40.0, -5.0]) + self.coeff_i[a][4, :] = 1 / 18 * np.array([0.0, 0.0, 0.0, 0.0, 18.0]) + + self.coeff_h[a][0, :] = 1 / 18 * np.array([23.0, -17.0, 7.0, -1.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 18 * np.array([-8.0, 56.0, -28.0, 4.0, 0.0, 0.0]) + self.coeff_h[a][2, :] = 1 / 18 * np.array([3.0, -21.0, 36.0, 36.0, -21.0, 3.0]) + self.coeff_h[a][3, :] = 1 / 18 * np.array([0.0, 0.0, 4.0, -28.0, 56.0, -8.0]) + self.coeff_h[a][4, :] = 1 / 18 * np.array([0.0, 0.0, -1.0, 7.0, -17.0, 23.0]) elif self.p[a] == 4: - self.coeff_i[a][0, :] = 1 / 360 * xp.array([360.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 360 * xp.array([-59.0, 944.0, -1000.0, 720.0, -305.0, 64.0, -4.0]) - self.coeff_i[a][2, :] = 1 / 360 * xp.array([23.0, -368.0, 1580.0, -1360.0, 605.0, -128.0, 8.0]) - self.coeff_i[a][3, :] = 1 / 360 * xp.array([-16.0, 256.0, -1180.0, 2240.0, -1180.0, 256.0, -16.0]) - self.coeff_i[a][4, :] = 1 / 360 * xp.array([8.0, -128.0, 605.0, -1360.0, 1580.0, -368.0, 23.0]) - self.coeff_i[a][5, :] = 1 / 360 * xp.array([-4.0, 64.0, -305.0, 720.0, -1000.0, 944.0, -59.0]) - self.coeff_i[a][6, :] = 1 / 360 * xp.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 360.0]) - - self.coeff_h[a][0, :] = 1 / 360 * xp.array([419.0, -525.0, 475.0, -245.0, 60.0, -4.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 360 * xp.array([-82.0, 1230.0, -1350.0, 730.0, -180.0, 12.0, 0.0, 0.0]) - self.coeff_h[a][2, :] = 1 / 360 * xp.array([39.0, -585.0, 2175.0, -1425.0, 360.0, -24.0, 0.0, 0.0]) + self.coeff_i[a][0, :] = 1 / 360 * np.array([360.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 360 * np.array([-59.0, 944.0, -1000.0, 720.0, -305.0, 64.0, -4.0]) + self.coeff_i[a][2, :] = 1 / 360 * np.array([23.0, -368.0, 1580.0, -1360.0, 605.0, -128.0, 8.0]) + self.coeff_i[a][3, :] = 1 / 360 * np.array([-16.0, 256.0, -1180.0, 2240.0, -1180.0, 256.0, -16.0]) + self.coeff_i[a][4, :] = 1 / 360 * np.array([8.0, -128.0, 605.0, -1360.0, 1580.0, -368.0, 23.0]) + self.coeff_i[a][5, :] = 1 / 360 * np.array([-4.0, 64.0, -305.0, 720.0, -1000.0, 944.0, -59.0]) + self.coeff_i[a][6, :] = 1 / 360 * np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 360.0]) + + self.coeff_h[a][0, :] = 1 / 360 * np.array([419.0, -525.0, 475.0, -245.0, 60.0, -4.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 360 * np.array([-82.0, 1230.0, -1350.0, 730.0, -180.0, 12.0, 0.0, 0.0]) + self.coeff_h[a][2, :] = 1 / 360 * np.array([39.0, -585.0, 2175.0, -1425.0, 360.0, -24.0, 0.0, 0.0]) self.coeff_h[a][3, :] = ( - 1 / 360 * xp.array([-16.0, 240.0, -924.0, 1060.0, 1060.0, -924.0, 240.0, -16.0]) + 1 / 360 * np.array([-16.0, 240.0, -924.0, 1060.0, 1060.0, -924.0, 240.0, -16.0]) ) - self.coeff_h[a][4, :] = 1 / 360 * xp.array([0.0, 0.0, -24.0, 360.0, -1425.0, 2175.0, -585.0, 39.0]) - self.coeff_h[a][5, :] = 1 / 360 * xp.array([0.0, 0.0, 12.0, -180.0, 730.0, -1350.0, 1230.0, -82.0]) - self.coeff_h[a][6, :] = 1 / 360 * xp.array([0.0, 0.0, -4.0, 60.0, -245.0, 475.0, -525.0, 419.0]) + self.coeff_h[a][4, :] = 1 / 360 * np.array([0.0, 0.0, -24.0, 360.0, -1425.0, 2175.0, -585.0, 39.0]) + self.coeff_h[a][5, :] = 1 / 360 * np.array([0.0, 0.0, 12.0, -180.0, 730.0, -1350.0, 1230.0, -82.0]) + self.coeff_h[a][6, :] = 1 / 360 * np.array([0.0, 0.0, -4.0, 60.0, -245.0, 475.0, -525.0, 419.0]) else: print("degree > 4 not implemented!") @@ -727,31 +723,31 @@ def __init__(self, tensor_space, n_quad): ) # number of non-vanishing D bf in interpolation interval (1, 2, 4, 6) self.x_int = [ - xp.zeros((n_lambda_int, n_int), dtype=float) for n_lambda_int, n_int in zip(n_lambda_int, self.n_int) + np.zeros((n_lambda_int, n_int), dtype=float) for n_lambda_int, n_int in zip(n_lambda_int, self.n_int) ] self.int_global_N = [ - xp.zeros((n_lambda_int, n_int_locbf_N), dtype=int) + np.zeros((n_lambda_int, n_int_locbf_N), dtype=int) for n_lambda_int, n_int_locbf_N in zip(n_lambda_int, self.n_int_locbf_N) ] self.int_global_D = [ - xp.zeros((n_lambda_int, n_int_locbf_D), dtype=int) + np.zeros((n_lambda_int, n_int_locbf_D), dtype=int) for n_lambda_int, n_int_locbf_D in zip(n_lambda_int, self.n_int_locbf_D) ] self.int_loccof_N = [ - xp.zeros((n_lambda_int, n_int_locbf_N), dtype=int) + np.zeros((n_lambda_int, n_int_locbf_N), dtype=int) for n_lambda_int, n_int_locbf_N in zip(n_lambda_int, self.n_int_locbf_N) ] self.int_loccof_D = [ - xp.zeros((n_lambda_int, n_int_locbf_D), dtype=int) + np.zeros((n_lambda_int, n_int_locbf_D), dtype=int) for n_lambda_int, n_int_locbf_D in zip(n_lambda_int, self.n_int_locbf_D) ] self.x_int_indices = [ - xp.zeros((n_lambda_int, n_int), dtype=int) for n_lambda_int, n_int in zip(n_lambda_int, self.n_int) + np.zeros((n_lambda_int, n_int), dtype=int) for n_lambda_int, n_int in zip(n_lambda_int, self.n_int) ] - self.coeffi_indices = [xp.zeros(n_lambda_int, dtype=int) for n_lambda_int in n_lambda_int] + self.coeffi_indices = [np.zeros(n_lambda_int, dtype=int) for n_lambda_int in n_lambda_int] self.n_int_nvcof_D = [None, None, None] self.n_int_nvcof_N = [None, None, None] @@ -763,7 +759,7 @@ def __init__(self, tensor_space, n_quad): self.int_shift_N = [0, 0, 0] for a in range(3): - if not self.bc[a]: + if self.bc[a] == False: # maximum number of non-vanishing coefficients if self.p[a] == 1: self.n_int_nvcof_D[a] = 2 @@ -774,39 +770,39 @@ def __init__(self, tensor_space, n_quad): self.n_int_nvcof_N[a] = 3 * self.p[a] - 2 # shift in local coefficient indices at right boundary (only for non-periodic boundary conditions) - self.int_add_D[a] = xp.arange(self.n_int[a] - 2) + 1 - self.int_add_N[a] = xp.arange(self.n_int[a] - 1) + 1 + self.int_add_D[a] = np.arange(self.n_int[a] - 2) + 1 + self.int_add_N[a] = np.arange(self.n_int[a] - 1) + 1 counter_D = 0 counter_N = 0 # shift local coefficients --> global coefficients (D) if self.p[a] == 1: - self.int_shift_D[a] = xp.arange(self.NbaseD[a]) + self.int_shift_D[a] = np.arange(self.NbaseD[a]) else: - self.int_shift_D[a] = xp.arange(self.NbaseD[a]) - (self.p[a] - 2) + self.int_shift_D[a] = np.arange(self.NbaseD[a]) - (self.p[a] - 2) self.int_shift_D[a][: 2 * self.p[a] - 2] = 0 self.int_shift_D[a][-(2 * self.p[a] - 2) :] = self.int_shift_D[a][-(2 * self.p[a] - 2)] # shift local coefficients --> global coefficients (N) if self.p[a] == 1: - self.int_shift_N[a] = xp.arange(self.NbaseN[a]) + self.int_shift_N[a] = np.arange(self.NbaseN[a]) self.int_shift_N[a][-1] = self.int_shift_N[a][-2] else: - self.int_shift_N[a] = xp.arange(self.NbaseN[a]) - (self.p[a] - 1) + self.int_shift_N[a] = np.arange(self.NbaseN[a]) - (self.p[a] - 1) self.int_shift_N[a][: 2 * self.p[a] - 1] = 0 self.int_shift_N[a][-(2 * self.p[a] - 1) :] = self.int_shift_N[a][-(2 * self.p[a] - 1)] - counter_coeffi = xp.copy(self.p[a]) + counter_coeffi = np.copy(self.p[a]) for i in range(n_lambda_int[a]): # left boundary region if i < self.p[a] - 1: - self.int_global_N[a][i] = xp.arange(self.n_int_locbf_N[a]) - self.int_global_D[a][i] = xp.arange(self.n_int_locbf_D[a]) + self.int_global_N[a][i] = np.arange(self.n_int_locbf_N[a]) + self.int_global_D[a][i] = np.arange(self.n_int_locbf_D[a]) - self.x_int_indices[a][i] = xp.arange(self.n_int[a]) + self.x_int_indices[a][i] = np.arange(self.n_int[a]) self.coeffi_indices[a][i] = i for j in range(2 * (self.p[a] - 1) + 1): xi = self.p[a] - 1 @@ -817,13 +813,13 @@ def __init__(self, tensor_space, n_quad): # right boundary region elif i > n_lambda_int[a] - self.p[a]: self.int_global_N[a][i] = ( - xp.arange(self.n_int_locbf_N[a]) + n_lambda_int[a] - self.p[a] - (self.p[a] - 1) + np.arange(self.n_int_locbf_N[a]) + n_lambda_int[a] - self.p[a] - (self.p[a] - 1) ) self.int_global_D[a][i] = ( - xp.arange(self.n_int_locbf_D[a]) + n_lambda_int[a] - self.p[a] - (self.p[a] - 1) + np.arange(self.n_int_locbf_D[a]) + n_lambda_int[a] - self.p[a] - (self.p[a] - 1) ) - self.x_int_indices[a][i] = xp.arange(self.n_int[a]) + 2 * ( + self.x_int_indices[a][i] = np.arange(self.n_int[a]) + 2 * ( n_lambda_int[a] - self.p[a] - (self.p[a] - 1) ) self.coeffi_indices[a][i] = counter_coeffi @@ -837,20 +833,20 @@ def __init__(self, tensor_space, n_quad): # interior else: if self.p[a] == 1: - self.int_global_N[a][i] = xp.arange(self.n_int_locbf_N[a]) + i - self.int_global_D[a][i] = xp.arange(self.n_int_locbf_D[a]) + i + self.int_global_N[a][i] = np.arange(self.n_int_locbf_N[a]) + i + self.int_global_D[a][i] = np.arange(self.n_int_locbf_D[a]) + i self.int_global_N[a][-1] = self.int_global_N[a][-2] self.int_global_D[a][-1] = self.int_global_D[a][-2] else: - self.int_global_N[a][i] = xp.arange(self.n_int_locbf_N[a]) + i - (self.p[a] - 1) - self.int_global_D[a][i] = xp.arange(self.n_int_locbf_D[a]) + i - (self.p[a] - 1) + self.int_global_N[a][i] = np.arange(self.n_int_locbf_N[a]) + i - (self.p[a] - 1) + self.int_global_D[a][i] = np.arange(self.n_int_locbf_D[a]) + i - (self.p[a] - 1) if self.p[a] == 1: self.x_int_indices[a][i] = i else: - self.x_int_indices[a][i] = xp.arange(self.n_int[a]) + 2 * (i - (self.p[a] - 1)) + self.x_int_indices[a][i] = np.arange(self.n_int[a]) + 2 * (i - (self.p[a] - 1)) self.coeffi_indices[a][i] = self.p[a] - 1 @@ -861,8 +857,8 @@ def __init__(self, tensor_space, n_quad): # local coefficient index if self.p[a] == 1: - self.int_loccof_N[a][i] = xp.array([0, 1]) - self.int_loccof_D[a][-1] = xp.array([1]) + self.int_loccof_N[a][i] = np.array([0, 1]) + self.int_loccof_D[a][-1] = np.array([1]) else: if i > 0: @@ -870,8 +866,8 @@ def __init__(self, tensor_space, n_quad): k_glob_new = self.int_global_D[a][i, il] bol = k_glob_new == self.int_global_D[a][i - 1] - if xp.any(bol): - self.int_loccof_D[a][i, il] = self.int_loccof_D[a][i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.int_loccof_D[a][i, il] = self.int_loccof_D[a][i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_int[a] - self.p[a] - (self.p[a] - 2)) and ( self.int_loccof_D[a][i, il] == 0 @@ -883,8 +879,8 @@ def __init__(self, tensor_space, n_quad): k_glob_new = self.int_global_N[a][i, il] bol = k_glob_new == self.int_global_N[a][i - 1] - if xp.any(bol): - self.int_loccof_N[a][i, il] = self.int_loccof_N[a][i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.int_loccof_N[a][i, il] = self.int_loccof_N[a][i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_int[a] - self.p[a] - (self.p[a] - 2)) and ( self.int_loccof_N[a][i, il] == 0 @@ -904,24 +900,24 @@ def __init__(self, tensor_space, n_quad): # shift local coefficients --> global coefficients if self.p[a] == 1: - self.int_shift_D[a] = xp.arange(self.NbaseN[a]) - (self.p[a] - 1) - self.int_shift_N[a] = xp.arange(self.NbaseN[a]) - (self.p[a]) + self.int_shift_D[a] = np.arange(self.NbaseN[a]) - (self.p[a] - 1) + self.int_shift_N[a] = np.arange(self.NbaseN[a]) - (self.p[a]) else: - self.int_shift_D[a] = xp.arange(self.NbaseN[a]) - (self.p[a] - 2) - self.int_shift_N[a] = xp.arange(self.NbaseN[a]) - (self.p[a] - 1) + self.int_shift_D[a] = np.arange(self.NbaseN[a]) - (self.p[a] - 2) + self.int_shift_N[a] = np.arange(self.NbaseN[a]) - (self.p[a] - 1) for i in range(n_lambda_int[a]): # global indices of non-vanishing basis functions and position of coefficients in final matrix - self.int_global_N[a][i] = (xp.arange(self.n_int_locbf_N[a]) + i - (self.p[a] - 1)) % self.NbaseN[a] - self.int_global_D[a][i] = (xp.arange(self.n_int_locbf_D[a]) + i - (self.p[a] - 1)) % self.NbaseD[a] + self.int_global_N[a][i] = (np.arange(self.n_int_locbf_N[a]) + i - (self.p[a] - 1)) % self.NbaseN[a] + self.int_global_D[a][i] = (np.arange(self.n_int_locbf_D[a]) + i - (self.p[a] - 1)) % self.NbaseD[a] - self.int_loccof_N[a][i] = xp.arange(self.n_int_locbf_N[a] - 1, -1, -1) - self.int_loccof_D[a][i] = xp.arange(self.n_int_locbf_D[a] - 1, -1, -1) + self.int_loccof_N[a][i] = np.arange(self.n_int_locbf_N[a] - 1, -1, -1) + self.int_loccof_D[a][i] = np.arange(self.n_int_locbf_D[a] - 1, -1, -1) if self.p[a] == 1: self.x_int_indices[a][i] = i else: - self.x_int_indices[a][i] = (xp.arange(self.n_int[a]) + 2 * (i - (self.p[a] - 1))) % ( + self.x_int_indices[a][i] = (np.arange(self.n_int[a]) + 2 * (i - (self.p[a] - 1))) % ( 2 * self.Nel[a] ) @@ -933,38 +929,38 @@ def __init__(self, tensor_space, n_quad): ) % 1.0 # set histopolation points, quadrature points and weights - n_lambda_his = [xp.copy(NbaseD) for NbaseD in self.NbaseD] # number of coefficients in space V1 + n_lambda_his = [np.copy(NbaseD) for NbaseD in self.NbaseD] # number of coefficients in space V1 self.n_his = [2 * p for p in self.p] # number of histopolation intervals self.n_his_locbf_N = [2 * p for p in self.p] # number of non-vanishing N bf in histopolation interval self.n_his_locbf_D = [2 * p - 1 for p in self.p] # number of non-vanishing D bf in histopolation interval self.x_his = [ - xp.zeros((n_lambda_his, n_his + 1), dtype=float) for n_lambda_his, n_his in zip(n_lambda_his, self.n_his) + np.zeros((n_lambda_his, n_his + 1), dtype=float) for n_lambda_his, n_his in zip(n_lambda_his, self.n_his) ] self.his_global_N = [ - xp.zeros((n_lambda_his, n_his_locbf_N), dtype=int) + np.zeros((n_lambda_his, n_his_locbf_N), dtype=int) for n_lambda_his, n_his_locbf_N in zip(n_lambda_his, self.n_his_locbf_N) ] self.his_global_D = [ - xp.zeros((n_lambda_his, n_his_locbf_D), dtype=int) + np.zeros((n_lambda_his, n_his_locbf_D), dtype=int) for n_lambda_his, n_his_locbf_D in zip(n_lambda_his, self.n_his_locbf_D) ] self.his_loccof_N = [ - xp.zeros((n_lambda_his, n_his_locbf_N), dtype=int) + np.zeros((n_lambda_his, n_his_locbf_N), dtype=int) for n_lambda_his, n_his_locbf_N in zip(n_lambda_his, self.n_his_locbf_N) ] self.his_loccof_D = [ - xp.zeros((n_lambda_his, n_his_locbf_D), dtype=int) + np.zeros((n_lambda_his, n_his_locbf_D), dtype=int) for n_lambda_his, n_his_locbf_D in zip(n_lambda_his, self.n_his_locbf_D) ] self.x_his_indices = [ - xp.zeros((n_lambda_his, n_his), dtype=int) for n_lambda_his, n_his in zip(n_lambda_his, self.n_his) + np.zeros((n_lambda_his, n_his), dtype=int) for n_lambda_his, n_his in zip(n_lambda_his, self.n_his) ] - self.coeffh_indices = [xp.zeros(n_lambda_his, dtype=int) for n_lambda_his in n_lambda_his] + self.coeffh_indices = [np.zeros(n_lambda_his, dtype=int) for n_lambda_his in n_lambda_his] self.pts = [0, 0, 0] self.wts = [0, 0, 0] @@ -979,37 +975,37 @@ def __init__(self, tensor_space, n_quad): self.his_shift_N = [0, 0, 0] for a in range(3): - if not self.bc[a]: + if self.bc[a] == False: # maximum number of non-vanishing coefficients self.n_his_nvcof_D[a] = 3 * self.p[a] - 2 self.n_his_nvcof_N[a] = 3 * self.p[a] - 1 # shift in local coefficient indices at right boundary (only for non-periodic boundary conditions) - self.his_add_D[a] = xp.arange(self.n_his[a] - 2) + 1 - self.his_add_N[a] = xp.arange(self.n_his[a] - 1) + 1 + self.his_add_D[a] = np.arange(self.n_his[a] - 2) + 1 + self.his_add_N[a] = np.arange(self.n_his[a] - 1) + 1 counter_D = 0 counter_N = 0 # shift local coefficients --> global coefficients (D) - self.his_shift_D[a] = xp.arange(self.NbaseD[a]) - (self.p[a] - 1) + self.his_shift_D[a] = np.arange(self.NbaseD[a]) - (self.p[a] - 1) self.his_shift_D[a][: 2 * self.p[a] - 1] = 0 self.his_shift_D[a][-(2 * self.p[a] - 1) :] = self.his_shift_D[a][-(2 * self.p[a] - 1)] # shift local coefficients --> global coefficients (N) - self.his_shift_N[a] = xp.arange(self.NbaseN[a]) - self.p[a] + self.his_shift_N[a] = np.arange(self.NbaseN[a]) - self.p[a] self.his_shift_N[a][: 2 * self.p[a]] = 0 self.his_shift_N[a][-2 * self.p[a] :] = self.his_shift_N[a][-2 * self.p[a]] - counter_coeffh = xp.copy(self.p[a]) + counter_coeffh = np.copy(self.p[a]) for i in range(n_lambda_his[a]): # left boundary region if i < self.p[a] - 1: - self.his_global_N[a][i] = xp.arange(self.n_his_locbf_N[a]) - self.his_global_D[a][i] = xp.arange(self.n_his_locbf_D[a]) + self.his_global_N[a][i] = np.arange(self.n_his_locbf_N[a]) + self.his_global_D[a][i] = np.arange(self.n_his_locbf_D[a]) - self.x_his_indices[a][i] = xp.arange(self.n_his[a]) + self.x_his_indices[a][i] = np.arange(self.n_his[a]) self.coeffh_indices[a][i] = i for j in range(2 * self.p[a] + 1): xi = self.p[a] - 1 @@ -1020,13 +1016,13 @@ def __init__(self, tensor_space, n_quad): # right boundary region elif i > n_lambda_his[a] - self.p[a]: self.his_global_N[a][i] = ( - xp.arange(self.n_his_locbf_N[a]) + n_lambda_his[a] - self.p[a] - (self.p[a] - 1) + np.arange(self.n_his_locbf_N[a]) + n_lambda_his[a] - self.p[a] - (self.p[a] - 1) ) self.his_global_D[a][i] = ( - xp.arange(self.n_his_locbf_D[a]) + n_lambda_his[a] - self.p[a] - (self.p[a] - 1) + np.arange(self.n_his_locbf_D[a]) + n_lambda_his[a] - self.p[a] - (self.p[a] - 1) ) - self.x_his_indices[a][i] = xp.arange(self.n_his[a]) + 2 * ( + self.x_his_indices[a][i] = np.arange(self.n_his[a]) + 2 * ( n_lambda_his[a] - self.p[a] - (self.p[a] - 1) ) self.coeffh_indices[a][i] = counter_coeffh @@ -1039,10 +1035,10 @@ def __init__(self, tensor_space, n_quad): # interior else: - self.his_global_N[a][i] = xp.arange(self.n_his_locbf_N[a]) + i - (self.p[a] - 1) - self.his_global_D[a][i] = xp.arange(self.n_his_locbf_D[a]) + i - (self.p[a] - 1) + self.his_global_N[a][i] = np.arange(self.n_his_locbf_N[a]) + i - (self.p[a] - 1) + self.his_global_D[a][i] = np.arange(self.n_his_locbf_D[a]) + i - (self.p[a] - 1) - self.x_his_indices[a][i] = xp.arange(self.n_his[a]) + 2 * (i - (self.p[a] - 1)) + self.x_his_indices[a][i] = np.arange(self.n_his[a]) + 2 * (i - (self.p[a] - 1)) self.coeffh_indices[a][i] = self.p[a] - 1 for j in range(2 * self.p[a] + 1): self.x_his[a][i, j] = ( @@ -1055,8 +1051,8 @@ def __init__(self, tensor_space, n_quad): k_glob_new = self.his_global_D[a][i, il] bol = k_glob_new == self.his_global_D[a][i - 1] - if xp.any(bol): - self.his_loccof_D[a][i, il] = self.his_loccof_D[a][i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.his_loccof_D[a][i, il] = self.his_loccof_D[a][i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_his[a] - self.p[a] - (self.p[a] - 2)) and ( self.his_loccof_D[a][i, il] == 0 @@ -1068,8 +1064,8 @@ def __init__(self, tensor_space, n_quad): k_glob_new = self.his_global_N[a][i, il] bol = k_glob_new == self.his_global_N[a][i - 1] - if xp.any(bol): - self.his_loccof_N[a][i, il] = self.his_loccof_N[a][i - 1, xp.where(bol)[0][0]] + 1 + if np.any(bol): + self.his_loccof_N[a][i, il] = self.his_loccof_N[a][i - 1, np.where(bol)[0][0]] + 1 if (k_glob_new >= n_lambda_his[a] - self.p[a] - (self.p[a] - 2)) and ( self.his_loccof_N[a][i, il] == 0 @@ -1079,9 +1075,7 @@ def __init__(self, tensor_space, n_quad): # quadrature points and weights self.pts[a], self.wts[a] = bsp.quadrature_grid( - xp.unique(self.x_his[a].flatten()), - self.pts_loc[a], - self.wts_loc[a], + np.unique(self.x_his[a].flatten()), self.pts_loc[a], self.wts_loc[a] ) else: @@ -1090,18 +1084,18 @@ def __init__(self, tensor_space, n_quad): self.n_his_nvcof_N[a] = 2 * self.p[a] # shift local coefficients --> global coefficients (D) - self.his_shift_D[a] = xp.arange(self.NbaseD[a]) - (self.p[a] - 1) + self.his_shift_D[a] = np.arange(self.NbaseD[a]) - (self.p[a] - 1) # shift local coefficients --> global coefficients (N) - self.his_shift_N[a] = xp.arange(self.NbaseD[a]) - self.p[a] + self.his_shift_N[a] = np.arange(self.NbaseD[a]) - self.p[a] for i in range(n_lambda_his[a]): - self.his_global_N[a][i] = (xp.arange(self.n_his_locbf_N[a]) + i - (self.p[a] - 1)) % self.NbaseN[a] - self.his_global_D[a][i] = (xp.arange(self.n_his_locbf_D[a]) + i - (self.p[a] - 1)) % self.NbaseD[a] - self.his_loccof_N[a][i] = xp.arange(self.n_his_locbf_N[a] - 1, -1, -1) - self.his_loccof_D[a][i] = xp.arange(self.n_his_locbf_D[a] - 1, -1, -1) + self.his_global_N[a][i] = (np.arange(self.n_his_locbf_N[a]) + i - (self.p[a] - 1)) % self.NbaseN[a] + self.his_global_D[a][i] = (np.arange(self.n_his_locbf_D[a]) + i - (self.p[a] - 1)) % self.NbaseD[a] + self.his_loccof_N[a][i] = np.arange(self.n_his_locbf_N[a] - 1, -1, -1) + self.his_loccof_D[a][i] = np.arange(self.n_his_locbf_D[a] - 1, -1, -1) - self.x_his_indices[a][i] = (xp.arange(self.n_his[a]) + 2 * (i - (self.p[a] - 1))) % ( + self.x_his_indices[a][i] = (np.arange(self.n_his[a]) + 2 * (i - (self.p[a] - 1))) % ( 2 * self.Nel[a] ) self.coeffh_indices[a][i] = 0 @@ -1111,9 +1105,7 @@ def __init__(self, tensor_space, n_quad): # quadrature points and weights self.pts[a], self.wts[a] = bsp.quadrature_grid( - xp.append(xp.unique(self.x_his[a].flatten() % 1.0), 1.0), - self.pts_loc[a], - self.wts_loc[a], + np.append(np.unique(self.x_his[a].flatten() % 1.0), 1.0), self.pts_loc[a], self.wts_loc[a] ) # projector on space V0 (interpolation) @@ -1139,18 +1131,18 @@ def pi_0(self, fun, include_bc=True, eval_kind="meshgrid"): """ # interpolation points - x_int1 = xp.unique(self.x_int[0].flatten()) - x_int2 = xp.unique(self.x_int[1].flatten()) - x_int3 = xp.unique(self.x_int[2].flatten()) + x_int1 = np.unique(self.x_int[0].flatten()) + x_int2 = np.unique(self.x_int[1].flatten()) + x_int3 = np.unique(self.x_int[2].flatten()) # evaluation of function at interpolation points - mat_f = xp.empty((x_int1.size, x_int2.size, x_int3.size), dtype=float) + mat_f = np.empty((x_int1.size, x_int2.size, x_int3.size), dtype=float) # external function call if a callable is passed if callable(fun): # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid(x_int1, x_int2, x_int3, indexing="ij") + pts1, pts2, pts3 = np.meshgrid(x_int1, x_int2, x_int3, indexing="ij") mat_f[:, :, :] = fun(pts1, pts2, pts3) # tensor-product evaluation is done by input function @@ -1169,7 +1161,7 @@ def pi_0(self, fun, include_bc=True, eval_kind="meshgrid"): print("no internal 3D function implemented!") # coefficients - lambdas = xp.zeros((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) + lambdas = np.zeros((self.NbaseN[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) ker_loc.kernel_pi0_3d( self.NbaseN, @@ -1212,20 +1204,20 @@ def pi_1(self, fun, include_bc=True, eval_kind="meshgrid"): """ # interpolation points - x_int1 = xp.unique(self.x_int[0].flatten()) - x_int2 = xp.unique(self.x_int[1].flatten()) - x_int3 = xp.unique(self.x_int[2].flatten()) + x_int1 = np.unique(self.x_int[0].flatten()) + x_int2 = np.unique(self.x_int[1].flatten()) + x_int3 = np.unique(self.x_int[2].flatten()) # ======== 1-component ======== # evaluation of function at interpolation/quadrature points - mat_f = xp.empty((self.pts[0].flatten().size, x_int2.size, x_int3.size), dtype=float) + mat_f = np.empty((self.pts[0].flatten().size, x_int2.size, x_int3.size), dtype=float) # external function call if a callable is passed if callable(fun[0]): # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid(self.pts[0].flatten(), x_int2, x_int3, indexing="ij") + pts1, pts2, pts3 = np.meshgrid(self.pts[0].flatten(), x_int2, x_int3, indexing="ij") mat_f[:, :, :] = fun[0](pts1, pts2, pts3) # tensor-product evaluation is done by input function @@ -1244,7 +1236,7 @@ def pi_1(self, fun, include_bc=True, eval_kind="meshgrid"): print("no internal 3D function implemented!") # compute coefficients - lambdas1 = xp.zeros((self.NbaseD[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) + lambdas1 = np.zeros((self.NbaseD[0], self.NbaseN[1], self.NbaseN[2]), dtype=float) ker_loc.kernel_pi11_3d( [self.NbaseD[0], self.NbaseN[1], self.NbaseN[2]], @@ -1267,13 +1259,13 @@ def pi_1(self, fun, include_bc=True, eval_kind="meshgrid"): # ======== 2-component ======== # evaluation of function at interpolation/quadrature points - mat_f = xp.empty((x_int1.size, self.pts[1].flatten().size, x_int3.size), dtype=float) + mat_f = np.empty((x_int1.size, self.pts[1].flatten().size, x_int3.size), dtype=float) # external function call if a callable is passed if callable(fun[1]): # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid(x_int1, self.pts[1].flatten(), x_int3, indexing="ij") + pts1, pts2, pts3 = np.meshgrid(x_int1, self.pts[1].flatten(), x_int3, indexing="ij") mat_f[:, :, :] = fun[1](pts1, pts2, pts3) # tensor-product evaluation is done by input function @@ -1292,7 +1284,7 @@ def pi_1(self, fun, include_bc=True, eval_kind="meshgrid"): print("no internal 3D function implemented!") # compute coefficients - lambdas2 = xp.zeros((self.NbaseN[0], self.NbaseD[1], self.NbaseN[2]), dtype=float) + lambdas2 = np.zeros((self.NbaseN[0], self.NbaseD[1], self.NbaseN[2]), dtype=float) ker_loc.kernel_pi12_3d( [self.NbaseN[0], self.NbaseD[1], self.NbaseN[2]], @@ -1315,13 +1307,13 @@ def pi_1(self, fun, include_bc=True, eval_kind="meshgrid"): # ======== 3-component ======== # evaluation of function at interpolation/quadrature points - mat_f = xp.empty((x_int1.size, x_int1.size, self.pts[2].flatten().size), dtype=float) + mat_f = np.empty((x_int1.size, x_int1.size, self.pts[2].flatten().size), dtype=float) # external function call if a callable is passed if callable(fun[2]): # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid(x_int1, x_int2, self.pts[2].flatten(), indexing="ij") + pts1, pts2, pts3 = np.meshgrid(x_int1, x_int2, self.pts[2].flatten(), indexing="ij") mat_f[:, :, :] = fun[2](pts1, pts2, pts3) # tensor-product evaluation is done by input function @@ -1340,7 +1332,7 @@ def pi_1(self, fun, include_bc=True, eval_kind="meshgrid"): print("no internal 3D function implemented!") # compute coefficients - lambdas3 = xp.zeros((self.NbaseN[0], self.NbaseN[1], self.NbaseD[2]), dtype=float) + lambdas3 = np.zeros((self.NbaseN[0], self.NbaseN[1], self.NbaseD[2]), dtype=float) ker_loc.kernel_pi13_3d( [self.NbaseN[0], self.NbaseN[1], self.NbaseD[2]], @@ -1360,7 +1352,7 @@ def pi_1(self, fun, include_bc=True, eval_kind="meshgrid"): lambdas3, ) - return xp.concatenate((lambdas1.flatten(), lambdas2.flatten(), lambdas3.flatten())) + return np.concatenate((lambdas1.flatten(), lambdas2.flatten(), lambdas3.flatten())) # projector on space V1 ([inter, histo, histo], [histo, inter, histo], [histo, histo, inter]) def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): @@ -1385,20 +1377,20 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): """ # interpolation points - x_int1 = xp.unique(self.x_int[0].flatten()) - x_int2 = xp.unique(self.x_int[1].flatten()) - x_int3 = xp.unique(self.x_int[2].flatten()) + x_int1 = np.unique(self.x_int[0].flatten()) + x_int2 = np.unique(self.x_int[1].flatten()) + x_int3 = np.unique(self.x_int[2].flatten()) # ======== 1-component ======== # evaluation of function at interpolation/quadrature points - mat_f = xp.empty((x_int1.size, self.pts[1].flatten().size, self.pts[2].flatten().size), dtype=float) + mat_f = np.empty((x_int1.size, self.pts[1].flatten().size, self.pts[2].flatten().size), dtype=float) # external function call if a callable is passed if callable(fun[0]): # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid(x_int1, self.pts[1].flatten(), self.pts[2].flatten(), indexing="ij") + pts1, pts2, pts3 = np.meshgrid(x_int1, self.pts[1].flatten(), self.pts[2].flatten(), indexing="ij") mat_f[:, :, :] = fun[0](pts1, pts2, pts3) # tensor-product evaluation is done by input function @@ -1417,7 +1409,7 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): print("no internal 3D function implemented!") # compute coefficients - lambdas1 = xp.zeros((self.NbaseN[0], self.NbaseD[1], self.NbaseD[2]), dtype=float) + lambdas1 = np.zeros((self.NbaseN[0], self.NbaseD[1], self.NbaseD[2]), dtype=float) ker_loc.kernel_pi21_3d( [self.NbaseN[0], self.NbaseD[1], self.NbaseD[2]], @@ -1435,11 +1427,7 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): self.wts[1], self.wts[2], mat_f.reshape( - x_int1.size, - self.pts[1].shape[0], - self.pts[1].shape[1], - self.pts[2].shape[0], - self.pts[2].shape[1], + x_int1.size, self.pts[1].shape[0], self.pts[1].shape[1], self.pts[2].shape[0], self.pts[2].shape[1] ), lambdas1, ) @@ -1447,13 +1435,13 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): # ======== 2-component ======== # evaluation of function at interpolation/quadrature points - mat_f = xp.empty((self.pts[0].flatten().size, x_int2.size, self.pts[2].flatten().size), dtype=float) + mat_f = np.empty((self.pts[0].flatten().size, x_int2.size, self.pts[2].flatten().size), dtype=float) # external function call if a callable is passed if callable(fun[1]): # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid(self.pts[0].flatten(), x_int2, self.pts[2].flatten(), indexing="ij") + pts1, pts2, pts3 = np.meshgrid(self.pts[0].flatten(), x_int2, self.pts[2].flatten(), indexing="ij") mat_f[:, :, :] = fun[1](pts1, pts2, pts3) # tensor-product evaluation is done by input function @@ -1472,7 +1460,7 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): print("no internal 3D function implemented!") # compute coefficients - lambdas2 = xp.zeros((self.NbaseD[0], self.NbaseN[1], self.NbaseD[2]), dtype=float) + lambdas2 = np.zeros((self.NbaseD[0], self.NbaseN[1], self.NbaseD[2]), dtype=float) ker_loc.kernel_pi22_3d( [self.NbaseD[0], self.NbaseN[1], self.NbaseD[2]], @@ -1490,11 +1478,7 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): self.wts[0], self.wts[2], mat_f.reshape( - self.pts[0].shape[0], - self.pts[0].shape[1], - x_int2.size, - self.pts[2].shape[0], - self.pts[2].shape[1], + self.pts[0].shape[0], self.pts[0].shape[1], x_int2.size, self.pts[2].shape[0], self.pts[2].shape[1] ), lambdas2, ) @@ -1502,13 +1486,13 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): # ======== 3-component ======== # evaluation of function at interpolation/quadrature points - mat_f = xp.empty((self.pts[0].flatten().size, self.pts[1].flatten().size, x_int3.size), dtype=float) + mat_f = np.empty((self.pts[0].flatten().size, self.pts[1].flatten().size, x_int3.size), dtype=float) # external function call if a callable is passed if callable(fun[2]): # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid(self.pts[0].flatten(), self.pts[1].flatten(), x_int3, indexing="ij") + pts1, pts2, pts3 = np.meshgrid(self.pts[0].flatten(), self.pts[1].flatten(), x_int3, indexing="ij") mat_f[:, :, :] = fun[2](pts1, pts2, pts3) # tensor-product evaluation is done by input function @@ -1527,7 +1511,7 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): print("no internal 3D function implemented!") # compute coefficients - lambdas3 = xp.zeros((self.NbaseD[0], self.NbaseD[1], self.NbaseN[2]), dtype=float) + lambdas3 = np.zeros((self.NbaseD[0], self.NbaseD[1], self.NbaseN[2]), dtype=float) ker_loc.kernel_pi23_3d( [self.NbaseD[0], self.NbaseD[1], self.NbaseN[2]], @@ -1545,16 +1529,12 @@ def pi_2(self, fun, include_bc=True, eval_kind="meshgrid"): self.wts[0], self.wts[1], mat_f.reshape( - self.pts[0].shape[0], - self.pts[0].shape[1], - self.pts[1].shape[0], - self.pts[1].shape[1], - x_int3.size, + self.pts[0].shape[0], self.pts[0].shape[1], self.pts[1].shape[0], self.pts[1].shape[1], x_int3.size ), lambdas3, ) - return xp.concatenate((lambdas1.flatten(), lambdas2.flatten(), lambdas3.flatten())) + return np.concatenate((lambdas1.flatten(), lambdas2.flatten(), lambdas3.flatten())) # projector on space V3 (histopolation) def pi_3(self, fun, include_bc=True, eval_kind="meshgrid"): @@ -1579,20 +1559,16 @@ def pi_3(self, fun, include_bc=True, eval_kind="meshgrid"): """ # evaluation of function at quadrature points - mat_f = xp.empty( - (self.pts[0].flatten().size, self.pts[1].flatten().size, self.pts[2].flatten().size), - dtype=float, + mat_f = np.empty( + (self.pts[0].flatten().size, self.pts[1].flatten().size, self.pts[2].flatten().size), dtype=float ) # external function call if a callable is passed if callable(fun): # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid( - self.pts[0].flatten(), - self.pts[1].flatten(), - self.pts[2].flatten(), - indexing="ij", + pts1, pts2, pts3 = np.meshgrid( + self.pts[0].flatten(), self.pts[1].flatten(), self.pts[2].flatten(), indexing="ij" ) mat_f[:, :, :] = fun(pts1, pts2, pts3) @@ -1606,9 +1582,7 @@ def pi_3(self, fun, include_bc=True, eval_kind="meshgrid"): for i2 in range(self.pts[1].size): for i3 in range(self.pts[2].size): mat_f[i1, i2, i3] = fun( - self.pts[0].flatten()[i1], - self.pts[1].flatten()[i2], - self.pts[2].flatten()[i3], + self.pts[0].flatten()[i1], self.pts[1].flatten()[i2], self.pts[2].flatten()[i3] ) # internal function call @@ -1616,7 +1590,7 @@ def pi_3(self, fun, include_bc=True, eval_kind="meshgrid"): print("no internal 3D function implemented!") # compute coefficients - lambdas = xp.zeros((self.NbaseD[0], self.NbaseD[1], self.NbaseD[2]), dtype=float) + lambdas = np.zeros((self.NbaseD[0], self.NbaseD[1], self.NbaseD[2]), dtype=float) ker_loc.kernel_pi3_3d( self.NbaseD, diff --git a/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_L2_projector_kernel.py b/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_L2_projector_kernel.py index 0e711dbcf..9aa26b243 100644 --- a/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_L2_projector_kernel.py +++ b/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_L2_projector_kernel.py @@ -1390,42 +1390,33 @@ def vv_1_form( # evaluation of function at interpolation/quadrature points mat_11 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_21 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_31 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_12 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_22 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_32 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_13 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_23 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_33 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) for i1 in range(cell_number[0]): @@ -1931,42 +1922,33 @@ def vv_push( # evaluation of function at interpolation/quadrature points mat_11 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_21 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_31 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_12 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_22 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_32 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_13 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_23 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_33 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) for i1 in range(cell_number[0]): diff --git a/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_function_projectors_L2.py b/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_function_projectors_L2.py index 137df7f09..e85dfaeb5 100644 --- a/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_function_projectors_L2.py +++ b/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_function_projectors_L2.py @@ -5,12 +5,12 @@ Classes for local projectors in 1D and 3D based on quasi-spline interpolation and histopolation. """ -import cunumpy as xp import scipy.sparse as spa from psydac.ddm.mpi import mpi as MPI import struphy.feec.bsplines as bsp import struphy.feec.projectors.shape_pro_local.shape_L2_projector_kernel as ker_loc +from struphy.utils.arrays import xp as np # ======================= 3d ==================================== @@ -50,48 +50,48 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): self.indD = tensor_space.indD self.polar = False # local projectors for polar splines are not implemented yet - self.lambdas_0 = xp.zeros((NbaseN[0], NbaseN[1], NbaseN[2]), dtype=float) - self.potential_lambdas_0 = xp.zeros((NbaseN[0], NbaseN[1], NbaseN[2]), dtype=float) + self.lambdas_0 = np.zeros((NbaseN[0], NbaseN[1], NbaseN[2]), dtype=float) + self.potential_lambdas_0 = np.zeros((NbaseN[0], NbaseN[1], NbaseN[2]), dtype=float) - self.lambdas_1_11 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.lambdas_1_12 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_1_13 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_1_11 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.lambdas_1_12 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_1_13 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_1_21 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.lambdas_1_22 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_1_23 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_1_21 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.lambdas_1_22 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_1_23 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_1_31 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.lambdas_1_32 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_1_33 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_1_31 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.lambdas_1_32 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_1_33 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_2_11 = xp.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) - self.lambdas_2_12 = xp.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_2_13 = xp.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_2_11 = np.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) + self.lambdas_2_12 = np.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_2_13 = np.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_2_21 = xp.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) - self.lambdas_2_22 = xp.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_2_23 = xp.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_2_21 = np.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) + self.lambdas_2_22 = np.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_2_23 = np.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_2_31 = xp.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) - self.lambdas_2_32 = xp.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_2_33 = xp.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_2_31 = np.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) + self.lambdas_2_32 = np.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_2_33 = np.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_3 = xp.zeros((NbaseD[0], NbaseD[1], NbaseD[2]), dtype=float) + self.lambdas_3 = np.zeros((NbaseD[0], NbaseD[1], NbaseD[2]), dtype=float) self.p_size = p_size self.p_shape = p_shape - self.related = xp.zeros(3, dtype=int) + self.related = np.zeros(3, dtype=int) for a in range(3): - # self.related[a] = int(xp.floor(NbaseN[a]/2.0)) + # self.related[a] = int(np.floor(NbaseN[a]/2.0)) self.related[a] = int( - xp.floor((3 * int((self.p_size[a] * (self.p_shape[a] + 1)) * self.Nel[a] + 1) + 3 * self.p[a]) / 2.0), + np.floor((3 * int((self.p_size[a] * (self.p_shape[a] + 1)) * self.Nel[a] + 1) + 3 * self.p[a]) / 2.0) ) if (2 * self.related[a] + 1) > NbaseN[a]: - self.related[a] = int(xp.floor(NbaseN[a] / 2.0)) + self.related[a] = int(np.floor(NbaseN[a] / 2.0)) - self.kernel_0_loc = xp.zeros( + self.kernel_0_loc = np.zeros( ( NbaseN[0], NbaseN[1], @@ -103,7 +103,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): dtype=float, ) - self.kernel_1_11_loc = xp.zeros( + self.kernel_1_11_loc = np.zeros( ( NbaseD[0], NbaseN[1], @@ -114,7 +114,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): ), dtype=float, ) - self.kernel_1_12_loc = xp.zeros( + self.kernel_1_12_loc = np.zeros( ( NbaseD[0], NbaseN[1], @@ -125,7 +125,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): ), dtype=float, ) - self.kernel_1_13_loc = xp.zeros( + self.kernel_1_13_loc = np.zeros( ( NbaseD[0], NbaseN[1], @@ -137,7 +137,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): dtype=float, ) - self.kernel_1_22_loc = xp.zeros( + self.kernel_1_22_loc = np.zeros( ( NbaseN[0], NbaseD[1], @@ -148,7 +148,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): ), dtype=float, ) - self.kernel_1_23_loc = xp.zeros( + self.kernel_1_23_loc = np.zeros( ( NbaseN[0], NbaseD[1], @@ -160,7 +160,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): dtype=float, ) - self.kernel_1_33_loc = xp.zeros( + self.kernel_1_33_loc = np.zeros( ( NbaseN[0], NbaseN[1], @@ -172,12 +172,12 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): dtype=float, ) - self.right_loc_1 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.right_loc_2 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.right_loc_3 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.right_loc_1 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.right_loc_2 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.right_loc_3 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) if self.mpi_rank == 0: - self.kernel_0 = xp.zeros( + self.kernel_0 = np.zeros( ( NbaseN[0], NbaseN[1], @@ -189,7 +189,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): dtype=float, ) - self.kernel_1_11 = xp.zeros( + self.kernel_1_11 = np.zeros( ( NbaseD[0], NbaseN[1], @@ -200,7 +200,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): ), dtype=float, ) - self.kernel_1_12 = xp.zeros( + self.kernel_1_12 = np.zeros( ( NbaseN[0], NbaseD[1], @@ -211,7 +211,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): ), dtype=float, ) - self.kernel_1_13 = xp.zeros( + self.kernel_1_13 = np.zeros( ( NbaseN[0], NbaseN[1], @@ -223,7 +223,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): dtype=float, ) - self.kernel_1_22 = xp.zeros( + self.kernel_1_22 = np.zeros( ( NbaseN[0], NbaseD[1], @@ -234,7 +234,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): ), dtype=float, ) - self.kernel_1_23 = xp.zeros( + self.kernel_1_23 = np.zeros( ( NbaseN[0], NbaseN[1], @@ -246,7 +246,7 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): dtype=float, ) - self.kernel_1_33 = xp.zeros( + self.kernel_1_33 = np.zeros( ( NbaseN[0], NbaseN[1], @@ -258,9 +258,9 @@ def __init__(self, tensor_space, p_shape, p_size, NbaseN, NbaseD, mpi_comm): dtype=float, ) - self.right_1 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.right_2 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.right_3 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.right_1 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.right_2 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.right_3 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) else: self.kernel_0 = None @@ -301,11 +301,11 @@ def assemble_0_form(self, tensor_space_FEM, mpi_comm): Nj = tensor_space_FEM.Nbase_0form # conversion to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -316,8 +316,7 @@ def assemble_0_form(self, tensor_space_FEM, mpi_comm): col = Nj[1] * Ni[2] * col1 + Ni[2] * col2 + col3 M = spa.csr_matrix( - (self.kernel_0.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_0.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M.eliminate_zeros() @@ -359,11 +358,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -374,8 +373,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M11 = spa.csr_matrix( - (self.kernel_1_11.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_11.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M11.eliminate_zeros() @@ -386,11 +384,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -401,8 +399,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M12 = spa.csr_matrix( - (self.kernel_1_12.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_12.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M12.eliminate_zeros() @@ -413,11 +410,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -428,8 +425,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M13 = spa.csr_matrix( - (self.kernel_1_13.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_13.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M13.eliminate_zeros() @@ -440,11 +436,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -455,8 +451,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M22 = spa.csr_matrix( - (self.kernel_1_22.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_22.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M22.eliminate_zeros() @@ -467,11 +462,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -482,8 +477,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M23 = spa.csr_matrix( - (self.kernel_1_23.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_23.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M23.eliminate_zeros() @@ -494,11 +488,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -509,15 +503,14 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M33 = spa.csr_matrix( - (self.kernel_1_33.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_33.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M33.eliminate_zeros() # final block matrix M = spa.bmat([[M11, M12, M13], [M12.T, M22, M23], [M13.T, M23.T, M33]], format="csr") # print('insider_check', self.kernel_1_33) - return (M, xp.concatenate((self.right_1.flatten(), self.right_2.flatten(), self.right_3.flatten()))) + return (M, np.concatenate((self.right_1.flatten(), self.right_2.flatten(), self.right_3.flatten()))) def heavy_test(self, test1, test2, test3, acc, particles_loc, Np, domain): ker_loc.kernel_1_heavy( @@ -590,7 +583,7 @@ def potential_pi_0(self, particles_loc, Np, domain, mpi_comm): ------- kernel_0 matrix """ - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: ker_loc.potential_kernel_0_form( Np, self.p, @@ -637,7 +630,7 @@ def S_pi_0(self, particles_loc, Np, domain): kernel_0 matrix """ self.kernel_0[:, :, :, :, :, :] = 0.0 - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: ker_loc.kernel_0_form( Np, self.p, @@ -699,7 +692,7 @@ def S_pi_1(self, particles_loc, Np, domain): self.right_loc_2[:, :, :] = 0.0 self.right_loc_3[:, :, :] = 0.0 - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: ker_loc.kernel_1_form( self.indN[0], self.indN[1], @@ -764,7 +757,7 @@ def S_pi_1(self, particles_loc, Np, domain): print("non-periodic case not implemented!!!") def vv_S1(self, particles_loc, Np, domain, index_label, accvv, dt, mpi_comm): - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: if index_label == 1: ker_loc.vv_1_form( self.wts[0][0], diff --git a/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_function_projectors_local.py b/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_function_projectors_local.py index 2ebb497a3..5951e835a 100644 --- a/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_function_projectors_local.py +++ b/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_function_projectors_local.py @@ -5,12 +5,12 @@ Classes for local projectors in 1D and 3D based on quasi-spline interpolation and histopolation. """ -import cunumpy as xp import scipy.sparse as spa from psydac.ddm.mpi import mpi as MPI import struphy.feec.bsplines as bsp import struphy.feec.projectors.shape_pro_local.shape_local_projector_kernel as ker_loc +from struphy.utils.arrays import xp as np # ======================= 3d ==================================== @@ -51,48 +51,48 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co self.polar = False # local projectors for polar splines are not implemented yet - self.lambdas_0 = xp.zeros((NbaseN[0], NbaseN[1], NbaseN[2]), dtype=float) - self.potential_lambdas_0 = xp.zeros((NbaseN[0], NbaseN[1], NbaseN[2]), dtype=float) + self.lambdas_0 = np.zeros((NbaseN[0], NbaseN[1], NbaseN[2]), dtype=float) + self.potential_lambdas_0 = np.zeros((NbaseN[0], NbaseN[1], NbaseN[2]), dtype=float) - self.lambdas_1_11 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.lambdas_1_12 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_1_13 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_1_11 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.lambdas_1_12 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_1_13 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_1_21 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.lambdas_1_22 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_1_23 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_1_21 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.lambdas_1_22 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_1_23 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_1_31 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.lambdas_1_32 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_1_33 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_1_31 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.lambdas_1_32 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_1_33 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_2_11 = xp.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) - self.lambdas_2_12 = xp.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_2_13 = xp.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_2_11 = np.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) + self.lambdas_2_12 = np.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_2_13 = np.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_2_21 = xp.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) - self.lambdas_2_22 = xp.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_2_23 = xp.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_2_21 = np.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) + self.lambdas_2_22 = np.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_2_23 = np.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_2_31 = xp.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) - self.lambdas_2_32 = xp.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) - self.lambdas_2_33 = xp.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) + self.lambdas_2_31 = np.zeros((NbaseN[0], NbaseD[1], NbaseD[2]), dtype=float) + self.lambdas_2_32 = np.zeros((NbaseD[0], NbaseN[1], NbaseD[2]), dtype=float) + self.lambdas_2_33 = np.zeros((NbaseD[0], NbaseD[1], NbaseN[2]), dtype=float) - self.lambdas_3 = xp.zeros((NbaseD[0], NbaseD[1], NbaseD[2]), dtype=float) + self.lambdas_3 = np.zeros((NbaseD[0], NbaseD[1], NbaseD[2]), dtype=float) self.p_size = p_size self.p_shape = p_shape - self.related = xp.zeros(3, dtype=int) + self.related = np.zeros(3, dtype=int) for a in range(3): - # self.related[a] = int(xp.floor(NbaseN[a]/2.0)) + # self.related[a] = int(np.floor(NbaseN[a]/2.0)) self.related[a] = int( - xp.floor((3 * int((self.p_size[a] * (self.p_shape[a] + 1)) * self.Nel[a] + 1) + 3 * self.p[a]) / 2.0), + np.floor((3 * int((self.p_size[a] * (self.p_shape[a] + 1)) * self.Nel[a] + 1) + 3 * self.p[a]) / 2.0) ) if (2 * self.related[a] + 1) > NbaseN[a]: - self.related[a] = int(xp.floor(NbaseN[a] / 2.0)) + self.related[a] = int(np.floor(NbaseN[a] / 2.0)) - self.kernel_0_loc = xp.zeros( + self.kernel_0_loc = np.zeros( ( NbaseN[0], NbaseN[1], @@ -104,7 +104,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co dtype=float, ) - self.kernel_1_11_loc = xp.zeros( + self.kernel_1_11_loc = np.zeros( ( NbaseD[0], NbaseN[1], @@ -115,7 +115,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co ), dtype=float, ) - self.kernel_1_12_loc = xp.zeros( + self.kernel_1_12_loc = np.zeros( ( NbaseD[0], NbaseN[1], @@ -126,7 +126,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co ), dtype=float, ) - self.kernel_1_13_loc = xp.zeros( + self.kernel_1_13_loc = np.zeros( ( NbaseD[0], NbaseN[1], @@ -138,7 +138,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co dtype=float, ) - self.kernel_1_22_loc = xp.zeros( + self.kernel_1_22_loc = np.zeros( ( NbaseN[0], NbaseD[1], @@ -149,7 +149,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co ), dtype=float, ) - self.kernel_1_23_loc = xp.zeros( + self.kernel_1_23_loc = np.zeros( ( NbaseN[0], NbaseD[1], @@ -161,7 +161,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co dtype=float, ) - self.kernel_1_33_loc = xp.zeros( + self.kernel_1_33_loc = np.zeros( ( NbaseN[0], NbaseN[1], @@ -173,12 +173,12 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co dtype=float, ) - self.right_loc_1 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.right_loc_2 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.right_loc_3 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.right_loc_1 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.right_loc_2 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.right_loc_3 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) if self.mpi_rank == 0: - self.kernel_0 = xp.zeros( + self.kernel_0 = np.zeros( ( NbaseN[0], NbaseN[1], @@ -190,7 +190,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co dtype=float, ) - self.kernel_1_11 = xp.zeros( + self.kernel_1_11 = np.zeros( ( NbaseD[0], NbaseN[1], @@ -201,7 +201,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co ), dtype=float, ) - self.kernel_1_12 = xp.zeros( + self.kernel_1_12 = np.zeros( ( NbaseN[0], NbaseD[1], @@ -212,7 +212,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co ), dtype=float, ) - self.kernel_1_13 = xp.zeros( + self.kernel_1_13 = np.zeros( ( NbaseN[0], NbaseN[1], @@ -224,7 +224,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co dtype=float, ) - self.kernel_1_22 = xp.zeros( + self.kernel_1_22 = np.zeros( ( NbaseN[0], NbaseD[1], @@ -235,7 +235,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co ), dtype=float, ) - self.kernel_1_23 = xp.zeros( + self.kernel_1_23 = np.zeros( ( NbaseN[0], NbaseN[1], @@ -247,7 +247,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co dtype=float, ) - self.kernel_1_33 = xp.zeros( + self.kernel_1_33 = np.zeros( ( NbaseN[0], NbaseN[1], @@ -259,9 +259,9 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co dtype=float, ) - self.right_1 = xp.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) - self.right_2 = xp.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) - self.right_3 = xp.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) + self.right_1 = np.zeros((NbaseD[0], NbaseN[1], NbaseN[2]), dtype=float) + self.right_2 = np.zeros((NbaseN[0], NbaseD[1], NbaseN[2]), dtype=float) + self.right_3 = np.zeros((NbaseN[0], NbaseN[1], NbaseD[2]), dtype=float) else: self.kernel_0 = None @@ -279,7 +279,7 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co self.right_2 = None self.right_3 = None - self.num_cell = xp.empty(3, dtype=int) + self.num_cell = np.empty(3, dtype=int) for i in range(3): if self.p[i] == 1: self.num_cell[i] = 1 @@ -287,16 +287,14 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co self.num_cell[i] = 2 # Gauss - Legendre quadrature points and weights in (-1, 1) - self.pts_loc = [xp.polynomial.legendre.leggauss(n_quad)[0] for n_quad in self.n_quad] - self.wts_loc = [xp.polynomial.legendre.leggauss(n_quad)[1] for n_quad in self.n_quad] + self.pts_loc = [np.polynomial.legendre.leggauss(n_quad)[0] for n_quad in self.n_quad] + self.wts_loc = [np.polynomial.legendre.leggauss(n_quad)[1] for n_quad in self.n_quad] self.pts = [0, 0, 0] self.wts = [0, 0, 0] for a in range(3): self.pts[a], self.wts[a] = bsp.quadrature_grid( - [0, 1.0 / 2.0 / self.Nel[a]], - self.pts_loc[a], - self.wts_loc[a], + [0, 1.0 / 2.0 / self.Nel[a]], self.pts_loc[a], self.wts_loc[a] ) # print('check_pts', self.pts[0].shape, self.pts[1].shape, self.pts[2].shape) # print('check_pts', self.wts) @@ -304,79 +302,79 @@ def __init__(self, tensor_space, n_quad, p_shape, p_size, NbaseN, NbaseD, mpi_co self.coeff_i = [0, 0, 0] self.coeff_h = [0, 0, 0] for a in range(3): - if self.bc[a]: - self.coeff_i[a] = xp.zeros(2 * self.p[a], dtype=float) - self.coeff_h[a] = xp.zeros(2 * self.p[a], dtype=float) + if self.bc[a] == True: + self.coeff_i[a] = np.zeros(2 * self.p[a], dtype=float) + self.coeff_h[a] = np.zeros(2 * self.p[a], dtype=float) if self.p[a] == 1: - self.coeff_i[a][:] = xp.array([1.0, 0.0]) - self.coeff_h[a][:] = xp.array([1.0, 1.0]) + self.coeff_i[a][:] = np.array([1.0, 0.0]) + self.coeff_h[a][:] = np.array([1.0, 1.0]) elif self.p[a] == 2: - self.coeff_i[a][:] = 1 / 2 * xp.array([-1.0, 4.0, -1.0, 0.0]) - self.coeff_h[a][:] = 1 / 2 * xp.array([-1.0, 3.0, 3.0, -1.0]) + self.coeff_i[a][:] = 1 / 2 * np.array([-1.0, 4.0, -1.0, 0.0]) + self.coeff_h[a][:] = 1 / 2 * np.array([-1.0, 3.0, 3.0, -1.0]) elif self.p[a] == 3: - self.coeff_i[a][:] = 1 / 6 * xp.array([1.0, -8.0, 20.0, -8.0, 1.0, 0.0]) - self.coeff_h[a][:] = 1 / 6 * xp.array([1.0, -7.0, 12.0, 12.0, -7.0, 1.0]) + self.coeff_i[a][:] = 1 / 6 * np.array([1.0, -8.0, 20.0, -8.0, 1.0, 0.0]) + self.coeff_h[a][:] = 1 / 6 * np.array([1.0, -7.0, 12.0, 12.0, -7.0, 1.0]) elif self.p[a] == 4: - self.coeff_i[a][:] = 2 / 45 * xp.array([-1.0, 16.0, -295 / 4, 140.0, -295 / 4, 16.0, -1.0, 0.0]) + self.coeff_i[a][:] = 2 / 45 * np.array([-1.0, 16.0, -295 / 4, 140.0, -295 / 4, 16.0, -1.0, 0.0]) self.coeff_h[a][:] = ( - 2 / 45 * xp.array([-1.0, 15.0, -231 / 4, 265 / 4, 265 / 4, -231 / 4, 15.0, -1.0]) + 2 / 45 * np.array([-1.0, 15.0, -231 / 4, 265 / 4, 265 / 4, -231 / 4, 15.0, -1.0]) ) else: print("degree > 4 not implemented!") else: - self.coeff_i[a] = xp.zeros((2 * self.p[a] - 1, 2 * self.p[a] - 1), dtype=float) - self.coeff_h[a] = xp.zeros((2 * self.p[a] - 1, 2 * self.p[a]), dtype=float) + self.coeff_i[a] = np.zeros((2 * self.p[a] - 1, 2 * self.p[a] - 1), dtype=float) + self.coeff_h[a] = np.zeros((2 * self.p[a] - 1, 2 * self.p[a]), dtype=float) if self.p[a] == 1: - self.coeff_i[a][0, :] = xp.array([1.0]) - self.coeff_h[a][0, :] = xp.array([1.0, 1.0]) + self.coeff_i[a][0, :] = np.array([1.0]) + self.coeff_h[a][0, :] = np.array([1.0, 1.0]) elif self.p[a] == 2: - self.coeff_i[a][0, :] = 1 / 2 * xp.array([2.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 2 * xp.array([-1.0, 4.0, -1.0]) - self.coeff_i[a][2, :] = 1 / 2 * xp.array([0.0, 0.0, 2.0]) + self.coeff_i[a][0, :] = 1 / 2 * np.array([2.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 2 * np.array([-1.0, 4.0, -1.0]) + self.coeff_i[a][2, :] = 1 / 2 * np.array([0.0, 0.0, 2.0]) - self.coeff_h[a][0, :] = 1 / 2 * xp.array([3.0, -1.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 2 * xp.array([-1.0, 3.0, 3.0, -1.0]) - self.coeff_h[a][2, :] = 1 / 2 * xp.array([0.0, 0.0, -1.0, 3.0]) + self.coeff_h[a][0, :] = 1 / 2 * np.array([3.0, -1.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 2 * np.array([-1.0, 3.0, 3.0, -1.0]) + self.coeff_h[a][2, :] = 1 / 2 * np.array([0.0, 0.0, -1.0, 3.0]) elif self.p[a] == 3: - self.coeff_i[a][0, :] = 1 / 18 * xp.array([18.0, 0.0, 0.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 18 * xp.array([-5.0, 40.0, -24.0, 8.0, -1.0]) - self.coeff_i[a][2, :] = 1 / 18 * xp.array([3.0, -24.0, 60.0, -24.0, 3.0]) - self.coeff_i[a][3, :] = 1 / 18 * xp.array([-1.0, 8.0, -24.0, 40.0, -5.0]) - self.coeff_i[a][4, :] = 1 / 18 * xp.array([0.0, 0.0, 0.0, 0.0, 18.0]) - - self.coeff_h[a][0, :] = 1 / 18 * xp.array([23.0, -17.0, 7.0, -1.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 18 * xp.array([-8.0, 56.0, -28.0, 4.0, 0.0, 0.0]) - self.coeff_h[a][2, :] = 1 / 18 * xp.array([3.0, -21.0, 36.0, 36.0, -21.0, 3.0]) - self.coeff_h[a][3, :] = 1 / 18 * xp.array([0.0, 0.0, 4.0, -28.0, 56.0, -8.0]) - self.coeff_h[a][4, :] = 1 / 18 * xp.array([0.0, 0.0, -1.0, 7.0, -17.0, 23.0]) + self.coeff_i[a][0, :] = 1 / 18 * np.array([18.0, 0.0, 0.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 18 * np.array([-5.0, 40.0, -24.0, 8.0, -1.0]) + self.coeff_i[a][2, :] = 1 / 18 * np.array([3.0, -24.0, 60.0, -24.0, 3.0]) + self.coeff_i[a][3, :] = 1 / 18 * np.array([-1.0, 8.0, -24.0, 40.0, -5.0]) + self.coeff_i[a][4, :] = 1 / 18 * np.array([0.0, 0.0, 0.0, 0.0, 18.0]) + + self.coeff_h[a][0, :] = 1 / 18 * np.array([23.0, -17.0, 7.0, -1.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 18 * np.array([-8.0, 56.0, -28.0, 4.0, 0.0, 0.0]) + self.coeff_h[a][2, :] = 1 / 18 * np.array([3.0, -21.0, 36.0, 36.0, -21.0, 3.0]) + self.coeff_h[a][3, :] = 1 / 18 * np.array([0.0, 0.0, 4.0, -28.0, 56.0, -8.0]) + self.coeff_h[a][4, :] = 1 / 18 * np.array([0.0, 0.0, -1.0, 7.0, -17.0, 23.0]) elif self.p[a] == 4: - self.coeff_i[a][0, :] = 1 / 360 * xp.array([360.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) - self.coeff_i[a][1, :] = 1 / 360 * xp.array([-59.0, 944.0, -1000.0, 720.0, -305.0, 64.0, -4.0]) - self.coeff_i[a][2, :] = 1 / 360 * xp.array([23.0, -368.0, 1580.0, -1360.0, 605.0, -128.0, 8.0]) - self.coeff_i[a][3, :] = 1 / 360 * xp.array([-16.0, 256.0, -1180.0, 2240.0, -1180.0, 256.0, -16.0]) - self.coeff_i[a][4, :] = 1 / 360 * xp.array([8.0, -128.0, 605.0, -1360.0, 1580.0, -368.0, 23.0]) - self.coeff_i[a][5, :] = 1 / 360 * xp.array([-4.0, 64.0, -305.0, 720.0, -1000.0, 944.0, -59.0]) - self.coeff_i[a][6, :] = 1 / 360 * xp.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 360.0]) - - self.coeff_h[a][0, :] = 1 / 360 * xp.array([419.0, -525.0, 475.0, -245.0, 60.0, -4.0, 0.0, 0.0]) - self.coeff_h[a][1, :] = 1 / 360 * xp.array([-82.0, 1230.0, -1350.0, 730.0, -180.0, 12.0, 0.0, 0.0]) - self.coeff_h[a][2, :] = 1 / 360 * xp.array([39.0, -585.0, 2175.0, -1425.0, 360.0, -24.0, 0.0, 0.0]) + self.coeff_i[a][0, :] = 1 / 360 * np.array([360.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + self.coeff_i[a][1, :] = 1 / 360 * np.array([-59.0, 944.0, -1000.0, 720.0, -305.0, 64.0, -4.0]) + self.coeff_i[a][2, :] = 1 / 360 * np.array([23.0, -368.0, 1580.0, -1360.0, 605.0, -128.0, 8.0]) + self.coeff_i[a][3, :] = 1 / 360 * np.array([-16.0, 256.0, -1180.0, 2240.0, -1180.0, 256.0, -16.0]) + self.coeff_i[a][4, :] = 1 / 360 * np.array([8.0, -128.0, 605.0, -1360.0, 1580.0, -368.0, 23.0]) + self.coeff_i[a][5, :] = 1 / 360 * np.array([-4.0, 64.0, -305.0, 720.0, -1000.0, 944.0, -59.0]) + self.coeff_i[a][6, :] = 1 / 360 * np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 360.0]) + + self.coeff_h[a][0, :] = 1 / 360 * np.array([419.0, -525.0, 475.0, -245.0, 60.0, -4.0, 0.0, 0.0]) + self.coeff_h[a][1, :] = 1 / 360 * np.array([-82.0, 1230.0, -1350.0, 730.0, -180.0, 12.0, 0.0, 0.0]) + self.coeff_h[a][2, :] = 1 / 360 * np.array([39.0, -585.0, 2175.0, -1425.0, 360.0, -24.0, 0.0, 0.0]) self.coeff_h[a][3, :] = ( - 1 / 360 * xp.array([-16.0, 240.0, -924.0, 1060.0, 1060.0, -924.0, 240.0, -16.0]) + 1 / 360 * np.array([-16.0, 240.0, -924.0, 1060.0, 1060.0, -924.0, 240.0, -16.0]) ) - self.coeff_h[a][4, :] = 1 / 360 * xp.array([0.0, 0.0, -24.0, 360.0, -1425.0, 2175.0, -585.0, 39.0]) - self.coeff_h[a][5, :] = 1 / 360 * xp.array([0.0, 0.0, 12.0, -180.0, 730.0, -1350.0, 1230.0, -82.0]) - self.coeff_h[a][6, :] = 1 / 360 * xp.array([0.0, 0.0, -4.0, 60.0, -245.0, 475.0, -525.0, 419.0]) + self.coeff_h[a][4, :] = 1 / 360 * np.array([0.0, 0.0, -24.0, 360.0, -1425.0, 2175.0, -585.0, 39.0]) + self.coeff_h[a][5, :] = 1 / 360 * np.array([0.0, 0.0, 12.0, -180.0, 730.0, -1350.0, 1230.0, -82.0]) + self.coeff_h[a][6, :] = 1 / 360 * np.array([0.0, 0.0, -4.0, 60.0, -245.0, 475.0, -525.0, 419.0]) else: print("degree > 4 not implemented!") @@ -404,11 +402,11 @@ def assemble_0_form(self, tensor_space_FEM, mpi_comm): Nj = tensor_space_FEM.Nbase_0form # conversion to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -419,8 +417,7 @@ def assemble_0_form(self, tensor_space_FEM, mpi_comm): col = Nj[1] * Ni[2] * col1 + Ni[2] * col2 + col3 M = spa.csr_matrix( - (self.kernel_0.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_0.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M.eliminate_zeros() @@ -462,11 +459,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -477,8 +474,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M11 = spa.csr_matrix( - (self.kernel_1_11.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_11.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M11.eliminate_zeros() @@ -489,11 +485,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -504,8 +500,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M12 = spa.csr_matrix( - (self.kernel_1_12.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_12.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M12.eliminate_zeros() @@ -516,11 +511,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -531,8 +526,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M13 = spa.csr_matrix( - (self.kernel_1_13.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_13.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M13.eliminate_zeros() @@ -543,11 +537,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -558,8 +552,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M22 = spa.csr_matrix( - (self.kernel_1_22.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_22.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M22.eliminate_zeros() @@ -570,11 +563,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -585,8 +578,7 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M23 = spa.csr_matrix( - (self.kernel_1_23.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_23.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M23.eliminate_zeros() @@ -597,11 +589,11 @@ def assemble_1_form(self, tensor_space_FEM): Nj = tensor_space_FEM.Nbase_1form[b] # convert to sparse matrix - indices = xp.indices( - (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1), + indices = np.indices( + (Ni[0], Ni[1], Ni[2], 2 * self.related[0] + 1, 2 * self.related[1] + 1, 2 * self.related[2] + 1) ) - shift = [xp.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] + shift = [np.arange(Ni) - offset for Ni, offset in zip(Ni, self.related)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -612,15 +604,14 @@ def assemble_1_form(self, tensor_space_FEM): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M33 = spa.csr_matrix( - (self.kernel_1_33.flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (self.kernel_1_33.flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M33.eliminate_zeros() # final block matrix M = spa.bmat([[M11, M12, M13], [M12.T, M22, M23], [M13.T, M23.T, M33]], format="csr") # print('insider_check', self.kernel_1_33) - return (M, xp.concatenate((self.right_1.flatten(), self.right_2.flatten(), self.right_3.flatten()))) + return (M, np.concatenate((self.right_1.flatten(), self.right_2.flatten(), self.right_3.flatten()))) def heavy_test(self, test1, test2, test3, acc, particles_loc, Np, domain): ker_loc.kernel_1_heavy( @@ -686,7 +677,7 @@ def potential_pi_0(self, particles_loc, Np, domain, mpi_comm): ------- kernel_0 matrix """ - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: ker_loc.potential_kernel_0_form( Np, self.p, @@ -733,7 +724,7 @@ def S_pi_0(self, particles_loc, Np, domain): kernel_0 matrix """ self.kernel_0[:, :, :, :, :, :] = 0.0 - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: ker_loc.kernel_0_form( Np, self.p, @@ -795,7 +786,7 @@ def S_pi_1(self, particles_loc, Np, domain): self.right_loc_2[:, :, :] = 0.0 self.right_loc_3[:, :, :] = 0.0 - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: ker_loc.kernel_1_form( self.right_loc_1, self.right_loc_2, @@ -882,7 +873,7 @@ def S_pi_01(self, particles_loc, Np, domain): self.right_loc_2[:, :, :] = 0.0 self.right_loc_3[:, :, :] = 0.0 - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: ker_loc.kernel_01_form( self.right_loc_1, self.right_loc_2, @@ -933,7 +924,7 @@ def S_pi_01(self, particles_loc, Np, domain): print("non-periodic case not implemented!!!") def vv_S1(self, particles_loc, Np, domain, index_label, accvv, dt, mpi_comm): - if self.bc[0] and self.bc[1] and self.bc[2]: + if self.bc[0] == True and self.bc[1] == True and self.bc[2] == True: if index_label == 1: ker_loc.vv_1_form( self.wts[0][0], diff --git a/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_local_projector_kernel.py b/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_local_projector_kernel.py index 6db315daa..c0ebc624d 100644 --- a/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_local_projector_kernel.py +++ b/src/struphy/eigenvalue_solvers/legacy/projectors_local/shape_pro_local/shape_local_projector_kernel.py @@ -108,8 +108,7 @@ def kernel_0_form( width[il1] = p[il1] + cell_number[il1] - 1 mat_f = empty( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], num_cell[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], num_cell[2]), dtype=float ) mat_f[:, :, :, :, :, :] = 0.0 @@ -336,8 +335,7 @@ def potential_kernel_0_form( width[il1] = p[il1] + cell_number[il1] - 1 mat_f = empty( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], num_cell[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], num_cell[2]), dtype=float ) mat_f[:, :, :, :, :, :] = 0.0 @@ -541,42 +539,33 @@ def kernel_1_form( # evaluation of function at interpolation/quadrature points mat_11 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_21 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_31 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_12 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_22 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_32 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_13 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_23 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_33 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) for i1 in range(cell_number[0]): @@ -1270,42 +1259,33 @@ def bv_localproj_push( # evaluation of function at interpolation/quadrature points mat_11 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_21 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_31 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_12 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_22 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_32 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_13 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_23 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_33 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) for i1 in range(cell_number[0]): @@ -1825,42 +1805,33 @@ def kernel_1_heavy( # evaluation of function at interpolation/quadrature points mat_11 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_21 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_31 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_12 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_22 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_32 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_13 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_23 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_33 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) for i1 in range(cell_number[0]): @@ -2402,42 +2373,33 @@ def vv_1_form( # evaluation of function at interpolation/quadrature points mat_11 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_21 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_31 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_12 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_22 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_32 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_13 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_23 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_33 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) for i1 in range(cell_number[0]): @@ -2943,42 +2905,33 @@ def vv_push( # evaluation of function at interpolation/quadrature points mat_11 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_21 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_31 = zeros( - (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], 2, num_cell[1], num_cell[2], quad[0]), dtype=float ) mat_12 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_22 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_32 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], 2, num_cell[2], quad[1]), dtype=float ) mat_13 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_23 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) mat_33 = zeros( - (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), - dtype=float, + (cell_number[0], cell_number[1], cell_number[2], num_cell[0], num_cell[1], 2, quad[2]), dtype=float ) for i1 in range(cell_number[0]): diff --git a/src/struphy/eigenvalue_solvers/mass_matrices_1d.py b/src/struphy/eigenvalue_solvers/mass_matrices_1d.py index b5013c088..99316215d 100644 --- a/src/struphy/eigenvalue_solvers/mass_matrices_1d.py +++ b/src/struphy/eigenvalue_solvers/mass_matrices_1d.py @@ -2,10 +2,10 @@ # # Copyright 2020 Florian Holderied -import cunumpy as xp import scipy.sparse as spa import struphy.bsplines.bsplines as bsp +from struphy.utils.arrays import xp as np # ======= mass matrices in 1D ==================== @@ -47,7 +47,7 @@ def get_M(spline_space, phi_i=0, phi_j=0, fun=None): # evaluation of weight function at quadrature points (optional) if fun == None: - mat_fun = xp.ones(pts.shape, dtype=float) + mat_fun = np.ones(pts.shape, dtype=float) else: mat_fun = fun(pts.flatten()).reshape(Nel, n_quad) @@ -74,7 +74,7 @@ def get_M(spline_space, phi_i=0, phi_j=0, fun=None): bj = basisD[:, :, 0, :] # matrix assembly - M = xp.zeros((Ni, 2 * p + 1), dtype=float) + M = np.zeros((Ni, 2 * p + 1), dtype=float) for ie in range(Nel): for il in range(p + 1 - ni): @@ -86,8 +86,8 @@ def get_M(spline_space, phi_i=0, phi_j=0, fun=None): M[(ie + il) % Ni, p + jl - il] += value - indices = xp.indices((Ni, 2 * p + 1)) - shift = xp.arange(Ni) - p + indices = np.indices((Ni, 2 * p + 1)) + shift = np.arange(Ni) - p row = indices[0].flatten() col = (indices[1] + shift[:, None]) % Nj @@ -137,13 +137,13 @@ def get_M_gen(spline_space, phi_i=0, phi_j=0, fun=None, jac=None): # evaluation of weight function at quadrature points (optional) if fun == None: - mat_fun = xp.ones(pts.shape, dtype=float) + mat_fun = np.ones(pts.shape, dtype=float) else: mat_fun = fun(pts.flatten()).reshape(Nel, n_quad) # evaluation of jacobian at quadrature points if jac == None: - mat_jac = xp.ones(pts.shape, dtype=float) + mat_jac = np.ones(pts.shape, dtype=float) else: mat_jac = jac(pts.flatten()).reshape(Nel, n_quad) @@ -180,7 +180,7 @@ def get_M_gen(spline_space, phi_i=0, phi_j=0, fun=None, jac=None): bj = basis_t[:, :, 0, :] # matrix assembly - M = xp.zeros((Ni, 2 * p + 1), dtype=float) + M = np.zeros((Ni, 2 * p + 1), dtype=float) for ie in range(Nel): for il in range(p + 1 - ni): @@ -192,8 +192,8 @@ def get_M_gen(spline_space, phi_i=0, phi_j=0, fun=None, jac=None): M[(ie + il) % Ni, p + jl - il] += value - indices = xp.indices((Ni, 2 * p + 1)) - shift = xp.arange(Ni) - p + indices = np.indices((Ni, 2 * p + 1)) + shift = np.arange(Ni) - p row = indices[0].flatten() col = (indices[1] + shift[:, None]) % Nj @@ -235,11 +235,11 @@ def test_M(spline_space, phi_i=0, phi_j=0, fun=lambda eta: 1.0, jac=lambda eta: bj = lambda eta: spline_space.evaluate_D(eta, cj) / spline_space.Nel # coefficients - ci = xp.zeros(Ni, dtype=float) - cj = xp.zeros(Nj, dtype=float) + ci = np.zeros(Ni, dtype=float) + cj = np.zeros(Nj, dtype=float) # integration - M = xp.zeros((Ni, Nj), dtype=float) + M = np.zeros((Ni, Nj), dtype=float) for i in range(Ni): for j in range(Nj): diff --git a/src/struphy/eigenvalue_solvers/mass_matrices_2d.py b/src/struphy/eigenvalue_solvers/mass_matrices_2d.py index c2c6c3ae9..e19b31ac6 100644 --- a/src/struphy/eigenvalue_solvers/mass_matrices_2d.py +++ b/src/struphy/eigenvalue_solvers/mass_matrices_2d.py @@ -2,10 +2,10 @@ # # Copyright 2020 Florian Holderied -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.kernels_2d as ker +from struphy.utils.arrays import xp as np # ================ mass matrix in V0 =========================== @@ -46,7 +46,7 @@ def get_M0(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): # evaluation of weight function at quadrature points if weight == None: - mat_w = xp.ones(det_df.shape, dtype=float) + mat_w = np.ones(det_df.shape, dtype=float) else: mat_w = weight(pts[0].flatten(), pts[1].flatten(), 0.0) mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) @@ -55,14 +55,14 @@ def get_M0(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_0form Nj = tensor_space_FEM.Nbase_0form - M = xp.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) + M = np.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array([0, 0]), - xp.array([0, 0]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array([0, 0]), + np.array([0, 0]), wts[0], wts[1], basisN[0], @@ -76,9 +76,9 @@ def get_M0(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # conversion to sparse matrix - indices = xp.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) + indices = np.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * indices[0] + indices[1]).flatten() @@ -156,7 +156,7 @@ def get_M1(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_1form[a] Nj = tensor_space_FEM.Nbase_1form[b] - M[a][b] = xp.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) + M[a][b] = np.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) # evaluate inverse metric tensor at quadrature points if weight == None: @@ -167,13 +167,13 @@ def get_M1(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) # assemble block if weight is not zero - if xp.any(mat_w): + if np.any(mat_w): ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array(ns[a]), - xp.array(ns[b]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array(ns[a]), + np.array(ns[b]), wts[0], wts[1], basis[a][0], @@ -187,9 +187,9 @@ def get_M1(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # convert to sparse matrix - indices = xp.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) + indices = np.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * indices[0] + indices[1]).flatten() @@ -272,7 +272,7 @@ def get_M2(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_2form[a] Nj = tensor_space_FEM.Nbase_2form[b] - M[a][b] = xp.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) + M[a][b] = np.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) # evaluate metric tensor at quadrature points if weight == None: @@ -283,13 +283,13 @@ def get_M2(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) # assemble block if weight is not zero - if xp.any(mat_w): + if np.any(mat_w): ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array(ns[a]), - xp.array(ns[b]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array(ns[a]), + np.array(ns[b]), wts[0], wts[1], basis[a][0], @@ -303,9 +303,9 @@ def get_M2(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # convert to sparse matrix - indices = xp.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) + indices = np.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * indices[0] + indices[1]).flatten() @@ -369,7 +369,7 @@ def get_M3(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): # evaluation of weight function at quadrature points if weight == None: - mat_w = xp.ones(det_df.shape, dtype=float) + mat_w = np.ones(det_df.shape, dtype=float) else: mat_w = weight(pts[0].flatten(), pts[1].flatten(), 0.0) mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) @@ -378,14 +378,14 @@ def get_M3(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_3form Nj = tensor_space_FEM.Nbase_3form - M = xp.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) + M = np.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array([1, 1]), - xp.array([1, 1]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array([1, 1]), + np.array([1, 1]), wts[0], wts[1], basisD[0], @@ -399,9 +399,9 @@ def get_M3(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # conversion to sparse matrix - indices = xp.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) + indices = np.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * indices[0] + indices[1]).flatten() @@ -475,7 +475,7 @@ def get_Mv(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_0form Nj = tensor_space_FEM.Nbase_0form - M[a][b] = xp.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) + M[a][b] = np.zeros((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1), dtype=float) # evaluate metric tensor at quadrature points if weight == None: @@ -486,13 +486,13 @@ def get_Mv(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1]) # assemble block if weight is not zero - if xp.any(mat_w): + if np.any(mat_w): ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array(ns[a]), - xp.array(ns[b]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array(ns[a]), + np.array(ns[b]), wts[0], wts[1], basis[a][0], @@ -506,9 +506,9 @@ def get_Mv(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # convert to sparse matrix - indices = xp.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) + indices = np.indices((Ni[0], Ni[1], 2 * p[0] + 1, 2 * p[1] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * indices[0] + indices[1]).flatten() diff --git a/src/struphy/eigenvalue_solvers/mass_matrices_3d.py b/src/struphy/eigenvalue_solvers/mass_matrices_3d.py index d3dc4cad2..ef6ee1e0c 100644 --- a/src/struphy/eigenvalue_solvers/mass_matrices_3d.py +++ b/src/struphy/eigenvalue_solvers/mass_matrices_3d.py @@ -2,10 +2,10 @@ # # Copyright 2020 Florian Holderied (florian.holderied@ipp.mpg.de) -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.kernels_3d as ker +from struphy.utils.arrays import xp as np # ================ mass matrix in V0 =========================== @@ -46,7 +46,7 @@ def get_M0(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): # evaluation of weight function at quadrature points if weight == None: - mat_w = xp.ones(det_df.shape, dtype=float) + mat_w = np.ones(det_df.shape, dtype=float) else: mat_w = weight(pts[0].flatten(), pts[1].flatten(), pts[2].flatten()) mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]) @@ -55,14 +55,14 @@ def get_M0(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_0form Nj = tensor_space_FEM.Nbase_0form - M = xp.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) + M = np.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array([0, 0, 0]), - xp.array([0, 0, 0]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array([0, 0, 0]), + np.array([0, 0, 0]), wts[0], wts[1], wts[2], @@ -80,9 +80,9 @@ def get_M0(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # conversion to sparse matrix - indices = xp.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) + indices = np.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -161,7 +161,7 @@ def get_M1(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_1form[a] Nj = tensor_space_FEM.Nbase_1form[b] - M[a][b] = xp.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) + M[a][b] = np.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) # evaluate metric tensor at quadrature points if weight == None: @@ -172,13 +172,13 @@ def get_M1(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]) # assemble block if weight is not zero - if xp.any(mat_w): + if np.any(mat_w): ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array(ns[a]), - xp.array(ns[b]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array(ns[a]), + np.array(ns[b]), wts[0], wts[1], wts[2], @@ -196,9 +196,9 @@ def get_M1(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # convert to sparse matrix - indices = xp.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) + indices = np.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -209,8 +209,7 @@ def get_M1(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M[a][b] = spa.csr_matrix( - (M[a][b].flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (M[a][b].flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M[a][b].eliminate_zeros() @@ -281,7 +280,7 @@ def get_M2(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_2form[a] Nj = tensor_space_FEM.Nbase_2form[b] - M[a][b] = xp.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) + M[a][b] = np.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) # evaluate metric tensor at quadrature points if weight == None: @@ -292,13 +291,13 @@ def get_M2(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]) # assemble block if weight is not zero - if xp.any(mat_w): + if np.any(mat_w): ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array(ns[a]), - xp.array(ns[b]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array(ns[a]), + np.array(ns[b]), wts[0], wts[1], wts[2], @@ -316,9 +315,9 @@ def get_M2(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # convert to sparse matrix - indices = xp.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) + indices = np.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -329,8 +328,7 @@ def get_M2(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M[a][b] = spa.csr_matrix( - (M[a][b].flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (M[a][b].flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M[a][b].eliminate_zeros() @@ -383,7 +381,7 @@ def get_M3(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): # evaluation of weight function at quadrature points if weight == None: - mat_w = xp.ones(det_df.shape, dtype=float) + mat_w = np.ones(det_df.shape, dtype=float) else: mat_w = weight(pts[0].flatten(), pts[1].flatten(), pts[2].flatten()) mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]) @@ -392,14 +390,14 @@ def get_M3(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_3form Nj = tensor_space_FEM.Nbase_3form - M = xp.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) + M = np.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array([1, 1, 1]), - xp.array([1, 1, 1]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array([1, 1, 1]), + np.array([1, 1, 1]), wts[0], wts[1], wts[2], @@ -417,9 +415,9 @@ def get_M3(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # conversion to sparse matrix - indices = xp.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) + indices = np.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -517,7 +515,7 @@ def get_Mv(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): Ni = tensor_space_FEM.Nbase_2form[a] Nj = tensor_space_FEM.Nbase_2form[b] - M[a][b] = xp.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) + M[a][b] = np.zeros((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1), dtype=float) # evaluate metric tensor at quadrature points if weight == None: @@ -528,13 +526,13 @@ def get_Mv(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): mat_w = mat_w.reshape(Nel[0], n_quad[0], Nel[1], n_quad[1], Nel[2], n_quad[2]) # assemble block if weight is not zero - if xp.any(mat_w): + if np.any(mat_w): ker.kernel_mass( - xp.array(Nel), - xp.array(p), - xp.array(n_quad), - xp.array(ns[a]), - xp.array(ns[b]), + np.array(Nel), + np.array(p), + np.array(n_quad), + np.array(ns[a]), + np.array(ns[b]), wts[0], wts[1], wts[2], @@ -552,9 +550,9 @@ def get_Mv(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): ) # convert to sparse matrix - indices = xp.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) + indices = np.indices((Ni[0], Ni[1], Ni[2], 2 * p[0] + 1, 2 * p[1] + 1, 2 * p[2] + 1)) - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni, p)] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni, p)] row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() @@ -565,8 +563,7 @@ def get_Mv(tensor_space_FEM, domain, apply_boundary_ops=False, weight=None): col = Nj[1] * Nj[2] * col1 + Nj[2] * col2 + col3 M[a][b] = spa.csr_matrix( - (M[a][b].flatten(), (row, col.flatten())), - shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]), + (M[a][b].flatten(), (row, col.flatten())), shape=(Ni[0] * Ni[1] * Ni[2], Nj[0] * Nj[1] * Nj[2]) ) M[a][b].eliminate_zeros() diff --git a/src/struphy/eigenvalue_solvers/mhd_axisymmetric_main.py b/src/struphy/eigenvalue_solvers/mhd_axisymmetric_main.py index 04a194c7f..009190582 100644 --- a/src/struphy/eigenvalue_solvers/mhd_axisymmetric_main.py +++ b/src/struphy/eigenvalue_solvers/mhd_axisymmetric_main.py @@ -32,11 +32,11 @@ def solve_mhd_ev_problem_2d(num_params, eq_mhd, n_tor, basis_tor="i", path_out=N import os import time - import cunumpy as xp import scipy.sparse as spa from struphy.eigenvalue_solvers.mhd_operators import MHDOperators from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space + from struphy.utils.arrays import xp as np print("\nStart of eigenspectrum calculation for toroidal mode number", n_tor) print("") @@ -45,13 +45,13 @@ def solve_mhd_ev_problem_2d(num_params, eq_mhd, n_tor, basis_tor="i", path_out=N # print grid info print("\nGrid parameters:") - print("number of elements :", num_params["Nel"]) - print("spline degrees :", num_params["p"]) - print("periodic bcs :", num_params["spl_kind"]) - print("hom. Dirichlet bc :", num_params["bc"]) - print("GL quad pts (L2) :", num_params["nq_el"]) - print("GL quad pts (hist) :", num_params["nq_pr"]) - print("polar Ck :", num_params["polar_ck"]) + print(f"number of elements :", num_params["Nel"]) + print(f"spline degrees :", num_params["p"]) + print(f"periodic bcs :", num_params["spl_kind"]) + print(f"hom. Dirichlet bc :", num_params["bc"]) + print(f"GL quad pts (L2) :", num_params["nq_el"]) + print(f"GL quad pts (hist) :", num_params["nq_pr"]) + print(f"polar Ck :", num_params["polar_ck"]) print("") # extract numerical parameters @@ -72,12 +72,7 @@ def solve_mhd_ev_problem_2d(num_params, eq_mhd, n_tor, basis_tor="i", path_out=N # set up 2d tensor-product space space_2d = Tensor_spline_space( - [space_1d_1, space_1d_2], - polar_ck, - eq_mhd.domain.cx[:, :, 0], - eq_mhd.domain.cy[:, :, 0], - n_tor, - basis_tor, + [space_1d_1, space_1d_2], polar_ck, eq_mhd.domain.cx[:, :, 0], eq_mhd.domain.cy[:, :, 0], n_tor, basis_tor ) # set up 2d projectors @@ -146,14 +141,14 @@ def solve_mhd_ev_problem_2d(num_params, eq_mhd, n_tor, basis_tor="i", path_out=N .dot( EF.T.dot(space_2d.C0.conjugate().T.dot(M2_0.dot(space_2d.C0.dot(EF)))) + mhd_ops.MJ_mat.dot(space_2d.C0.dot(EF)) - - space_2d.D0.conjugate().T.dot(M3_0.dot(L)), + - space_2d.D0.conjugate().T.dot(M3_0.dot(L)) ) .toarray() ) print("Assembly of final system matrix done --> start of eigenvalue calculation") - omega2, U2_eig = xp.linalg.eig(MAT) + omega2, U2_eig = np.linalg.eig(MAT) print("Eigenstates calculated") @@ -166,9 +161,8 @@ def solve_mhd_ev_problem_2d(num_params, eq_mhd, n_tor, basis_tor="i", path_out=N else: n_tor_str = "+" + str(n_tor) - xp.save( - os.path.join(path_out, "spec_n_" + n_tor_str + ".npy"), - xp.vstack((omega2.reshape(1, omega2.size), U2_eig)), + np.save( + os.path.join(path_out, "spec_n_" + n_tor_str + ".npy"), np.vstack((omega2.reshape(1, omega2.size), U2_eig)) ) # or return eigenfrequencies, eigenvectors and system matrix @@ -186,7 +180,7 @@ def solve_mhd_ev_problem_2d(num_params, eq_mhd, n_tor, basis_tor="i", path_out=N # parse arguments parser = argparse.ArgumentParser( - description="Computes the complete eigenspectrum for a given axisymmetric MHD equilibrium.", + description="Computes the complete eigenspectrum for a given axisymmetric MHD equilibrium." ) parser.add_argument("n_tor", type=int, help="the toroidal mode number") diff --git a/src/struphy/eigenvalue_solvers/mhd_axisymmetric_pproc.py b/src/struphy/eigenvalue_solvers/mhd_axisymmetric_pproc.py index 1685147e1..dc6e53ddd 100644 --- a/src/struphy/eigenvalue_solvers/mhd_axisymmetric_pproc.py +++ b/src/struphy/eigenvalue_solvers/mhd_axisymmetric_pproc.py @@ -3,9 +3,10 @@ def main(): import argparse import os - import cunumpy as xp import yaml + from struphy.utils.arrays import xp as np + # parse arguments parser = argparse.ArgumentParser(description="Restrict a full .npy eigenspectrum to a range of eigenfrequencies.") @@ -21,10 +22,7 @@ def main(): ) parser.add_argument( - "--input-abs", - type=str, - metavar="DIR", - help="directory with eigenspectrum (.npy) file, absolute path", + "--input-abs", type=str, metavar="DIR", help="directory with eigenspectrum (.npy) file, absolute path" ) parser.add_argument("lower", type=float, help="lower range of squared eigenfrequency") @@ -54,18 +52,18 @@ def main(): spec_path = os.path.join(input_path, "spec_n_" + n_tor_str + ".npy") - omega2, U2_eig = xp.split(xp.load(spec_path), [1], axis=0) + omega2, U2_eig = np.split(np.load(spec_path), [1], axis=0) omega2 = omega2.flatten() - modes_ind = xp.where((xp.real(omega2) < args.upper) & (xp.real(omega2) > args.lower))[0] + modes_ind = np.where((np.real(omega2) < args.upper) & (np.real(omega2) > args.lower))[0] omega2 = omega2[modes_ind] U2_eig = U2_eig[:, modes_ind] # save restricted spectrum - xp.save( + np.save( os.path.join(input_path, "spec_" + str(args.lower) + "_" + str(args.upper) + "_n_" + n_tor_str + ".npy"), - xp.vstack((omega2.reshape(1, omega2.size), U2_eig)), + np.vstack((omega2.reshape(1, omega2.size), U2_eig)), ) diff --git a/src/struphy/eigenvalue_solvers/mhd_operators.py b/src/struphy/eigenvalue_solvers/mhd_operators.py index 6f7325c6b..b2cb669ae 100644 --- a/src/struphy/eigenvalue_solvers/mhd_operators.py +++ b/src/struphy/eigenvalue_solvers/mhd_operators.py @@ -3,11 +3,11 @@ # Copyright 2021 Florian Holderied (florian.holderied@ipp.mpg.de) -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.legacy.mass_matrices_3d_pre as mass_3d_pre from struphy.eigenvalue_solvers.mhd_operators_core import MHDOperatorsCore +from struphy.utils.arrays import xp as np class MHDOperators: @@ -402,7 +402,7 @@ def __EF(self, u): out1 = self.int_N3.dot(self.dofs_EF[0].dot(u1).T).T + self.int_N3.dot(self.dofs_EF[1].dot(u3).T).T out3 = self.his_N3.dot(self.dofs_EF[2].dot(u1).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) elif self.core.basis_u == 2: u1, u3 = self.core.space.reshape_pol_2(u) @@ -410,7 +410,7 @@ def __EF(self, u): out1 = self.int_D3.dot(self.dofs_EF[0].dot(u1).T).T + self.int_N3.dot(self.dofs_EF[1].dot(u3).T).T out3 = self.his_D3.dot(self.dofs_EF[2].dot(u1).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) else: out = self.dofs_EF.dot(u) @@ -434,7 +434,7 @@ def __EF_transposed(self, e): ) out3 = self.int_N3.T.dot(self.dofs_EF[1].T.dot(e1).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) elif self.core.basis_u == 2: out1 = ( @@ -442,7 +442,7 @@ def __EF_transposed(self, e): ) out3 = self.int_N3.T.dot(self.dofs_EF[1].T.dot(e1).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) else: out = self.dofs_EF.T.dot(e) @@ -462,7 +462,7 @@ def __MF(self, u): out1 = self.his_N3.dot(self.dofs_MF[0].dot(u1).T).T out3 = self.int_N3.dot(self.dofs_MF[1].dot(u3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) elif self.core.basis_u == 2: u1, u3 = self.core.space.reshape_pol_2(u) @@ -470,7 +470,7 @@ def __MF(self, u): out1 = self.his_D3.dot(self.dofs_MF[0].dot(u1).T).T out3 = self.int_N3.dot(self.dofs_MF[1].dot(u3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) else: out = self.dofs_MF.dot(u) @@ -492,13 +492,13 @@ def __MF_transposed(self, f): out1 = self.his_N3.T.dot(self.dofs_MF[0].T.dot(f1).T).T out3 = self.int_N3.T.dot(self.dofs_MF[1].T.dot(f3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) elif self.core.basis_u == 2: out1 = self.his_D3.T.dot(self.dofs_MF[0].T.dot(f1).T).T out3 = self.int_N3.T.dot(self.dofs_MF[1].T.dot(f3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) else: out = self.dofs_MF.T.dot(f) @@ -518,7 +518,7 @@ def __PF(self, u): out1 = self.his_N3.dot(self.dofs_PF[0].dot(u1).T).T out3 = self.int_N3.dot(self.dofs_PF[1].dot(u3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) elif self.core.basis_u == 2: u1, u3 = self.core.space.reshape_pol_2(u) @@ -526,7 +526,7 @@ def __PF(self, u): out1 = self.his_D3.dot(self.dofs_PF[0].dot(u1).T).T out3 = self.int_N3.dot(self.dofs_PF[1].dot(u3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) else: out = self.dofs_PF.dot(u) @@ -548,13 +548,13 @@ def __PF_transposed(self, f): out1 = self.his_N3.T.dot(self.dofs_PF[0].T.dot(f1).T).T out3 = self.int_N3.T.dot(self.dofs_PF[1].T.dot(f3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) elif self.core.basis_u == 2: out1 = self.his_D3.T.dot(self.dofs_PF[0].T.dot(f1).T).T out3 = self.int_N3.T.dot(self.dofs_PF[1].T.dot(f3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) else: out = self.dofs_PF.T.dot(f) @@ -574,7 +574,7 @@ def __JF(self, u): out1 = self.his_N3.dot(self.dofs_JF[0].dot(u1).T).T out3 = self.int_N3.dot(self.dofs_JF[1].dot(u3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) elif self.core.basis_u == 2: u1, u3 = self.core.space.reshape_pol_2(u) @@ -582,7 +582,7 @@ def __JF(self, u): out1 = self.his_D3.dot(self.dofs_JF[0].dot(u1).T).T out3 = self.int_N3.dot(self.dofs_JF[1].dot(u3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) else: out = self.dofs_JF.dot(u) @@ -604,13 +604,13 @@ def __JF_transposed(self, f): out1 = self.his_N3.T.dot(self.dofs_JF[0].T.dot(f1).T).T out3 = self.int_N3.T.dot(self.dofs_JF[1].T.dot(f3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) elif self.core.basis_u == 2: out1 = self.his_D3.T.dot(self.dofs_JF[0].T.dot(f1).T).T out3 = self.int_N3.T.dot(self.dofs_JF[1].T.dot(f3).T).T - out = xp.concatenate((out1.flatten(), out3.flatten())) + out = np.concatenate((out1.flatten(), out3.flatten())) else: out = self.dofs_JF.T.dot(f) @@ -658,13 +658,11 @@ def __Mn(self, u): if self.Mn_as_tensor: if self.core.basis_u == 0: out = self.core.space.apply_Mv_ten( - u, - [[self.Mn_mat[0], self.core.space.M0_tor], [self.Mn_mat[1], self.core.space.M0_tor]], + u, [[self.Mn_mat[0], self.core.space.M0_tor], [self.Mn_mat[1], self.core.space.M0_tor]] ) elif self.core.basis_u == 2: out = self.core.space.apply_M2_ten( - u, - [[self.Mn_mat[0], self.core.space.M1_tor], [self.Mn_mat[1], self.core.space.M0_tor]], + u, [[self.Mn_mat[0], self.core.space.M1_tor], [self.Mn_mat[1], self.core.space.M0_tor]] ) else: @@ -680,16 +678,15 @@ def __MJ(self, b): if self.MJ_as_tensor: if self.core.basis_u == 0: - out = xp.zeros(self.core.space.Ev_0.shape[0], dtype=float) + out = np.zeros(self.core.space.Ev_0.shape[0], dtype=float) elif self.core.basis_u == 2: out = self.core.space.apply_M2_ten( - b, - [[self.MJ_mat[0], self.core.space.M1_tor], [self.MJ_mat[1], self.core.space.M0_tor]], + b, [[self.MJ_mat[0], self.core.space.M1_tor], [self.MJ_mat[1], self.core.space.M0_tor]] ) else: if self.core.basis_u == 0: - out = xp.zeros(self.core.space.Ev_0.shape[0], dtype=float) + out = np.zeros(self.core.space.Ev_0.shape[0], dtype=float) elif self.core.basis_u == 2: out = self.MJ_mat.dot(b) @@ -703,7 +700,7 @@ def __L(self, u): if self.core.basis_u == 0: out = -self.core.space.D0.dot(self.__PF(u)) - (self.gamma - 1) * self.__PR( - self.core.space.D0.dot(self.__JF(u)), + self.core.space.D0.dot(self.__JF(u)) ) elif self.core.basis_u == 2: out = -self.core.space.D0.dot(self.__PF(u)) - (self.gamma - 1) * self.__PR(self.core.space.D0.dot(u)) @@ -785,32 +782,27 @@ def set_operators(self, dt_2=1.0, dt_6=1.0): if hasattr(self, "dofs_Mn"): self.Mn = spa.linalg.LinearOperator( - (self.core.space.Ev_0.shape[0], self.core.space.Ev_0.shape[0]), - matvec=self.__Mn, + (self.core.space.Ev_0.shape[0], self.core.space.Ev_0.shape[0]), matvec=self.__Mn ) if hasattr(self, "dofs_MJ"): self.MJ = spa.linalg.LinearOperator( - (self.core.space.Ev_0.shape[0], self.core.space.E2_0.shape[0]), - matvec=self.__MJ, + (self.core.space.Ev_0.shape[0], self.core.space.E2_0.shape[0]), matvec=self.__MJ ) if hasattr(self, "dofs_PF") and hasattr(self, "dofs_PR") and hasattr(self, "dofs_JF"): self.L = spa.linalg.LinearOperator( - (self.core.space.E3_0.shape[0], self.core.space.Ev_0.shape[0]), - matvec=self.__L, + (self.core.space.E3_0.shape[0], self.core.space.Ev_0.shape[0]), matvec=self.__L ) if hasattr(self, "Mn_mat") and hasattr(self, "dofs_EF"): self.S2 = spa.linalg.LinearOperator( - (self.core.space.Ev_0.shape[0], self.core.space.Ev_0.shape[0]), - matvec=self.__S2, + (self.core.space.Ev_0.shape[0], self.core.space.Ev_0.shape[0]), matvec=self.__S2 ) if hasattr(self, "Mn_mat") and hasattr(self, "L"): self.S6 = spa.linalg.LinearOperator( - (self.core.space.Ev_0.shape[0], self.core.space.Ev_0.shape[0]), - matvec=self.__S6, + (self.core.space.Ev_0.shape[0], self.core.space.Ev_0.shape[0]), matvec=self.__S6 ) elif self.core.basis_u == 2: @@ -844,32 +836,27 @@ def set_operators(self, dt_2=1.0, dt_6=1.0): if hasattr(self, "Mn_mat"): self.Mn = spa.linalg.LinearOperator( - (self.core.space.E2_0.shape[0], self.core.space.E2_0.shape[0]), - matvec=self.__Mn, + (self.core.space.E2_0.shape[0], self.core.space.E2_0.shape[0]), matvec=self.__Mn ) if hasattr(self, "MJ_mat"): self.MJ = spa.linalg.LinearOperator( - (self.core.space.E2_0.shape[0], self.core.space.E2_0.shape[0]), - matvec=self.__MJ, + (self.core.space.E2_0.shape[0], self.core.space.E2_0.shape[0]), matvec=self.__MJ ) if hasattr(self, "dofs_PF") and hasattr(self, "dofs_PR"): self.L = spa.linalg.LinearOperator( - (self.core.space.E3_0.shape[0], self.core.space.E2_0.shape[0]), - matvec=self.__L, + (self.core.space.E3_0.shape[0], self.core.space.E2_0.shape[0]), matvec=self.__L ) if hasattr(self, "Mn_mat") and hasattr(self, "dofs_EF"): self.S2 = spa.linalg.LinearOperator( - (self.core.space.E2_0.shape[0], self.core.space.E2_0.shape[0]), - matvec=self.__S2, + (self.core.space.E2_0.shape[0], self.core.space.E2_0.shape[0]), matvec=self.__S2 ) if hasattr(self, "Mn_mat") and hasattr(self, "L"): self.S6 = spa.linalg.LinearOperator( - (self.core.space.E2_0.shape[0], self.core.space.E2_0.shape[0]), - matvec=self.__S6, + (self.core.space.E2_0.shape[0], self.core.space.E2_0.shape[0]), matvec=self.__S6 ) # ====================================== @@ -942,7 +929,7 @@ def guess_S2(self, u, b, kind): u_guess = u + self.dt_2 / 6 * (k1_u + 2 * k2_u + 2 * k3_u + k4_u) else: - u_guess = xp.copy(u) + u_guess = np.copy(u) return u_guess @@ -1037,7 +1024,7 @@ def set_preconditioner_S2(self, which, tol_inv=1e-15, drop_tol=1e-4, fill_fac=10 # assemble approximate S2 matrix S2_approx = Mn + self.dt_2**2 / 4 * EF_approx.T.dot( - self.core.space.C0.T.dot(M2_0.dot(self.core.space.C0.dot(EF_approx))), + self.core.space.C0.T.dot(M2_0.dot(self.core.space.C0.dot(EF_approx))) ) del Mn, EF_approx, M2_0 @@ -1136,7 +1123,7 @@ def set_preconditioner_S6(self, which, tol_inv=1e-15, drop_tol=1e-4, fill_fac=10 # assemble approximate L matrix if self.core.basis_u == 0: L_approx = -self.core.space.D0.dot(PF_approx) - (self.gamma - 1) * PR_approx.dot( - self.core.space.D0.dot(JF_approx), + self.core.space.D0.dot(JF_approx) ) del PF_approx, PR_approx diff --git a/src/struphy/eigenvalue_solvers/mhd_operators_core.py b/src/struphy/eigenvalue_solvers/mhd_operators_core.py index 61d534148..528ec2b78 100644 --- a/src/struphy/eigenvalue_solvers/mhd_operators_core.py +++ b/src/struphy/eigenvalue_solvers/mhd_operators_core.py @@ -3,12 +3,12 @@ # Copyright 2021 Florian Holderied (florian.holderied@ipp.mpg.de) -import cunumpy as xp import scipy.sparse as spa import struphy.eigenvalue_solvers.kernels_projectors_global_mhd as ker import struphy.eigenvalue_solvers.mass_matrices_2d as mass_2d import struphy.eigenvalue_solvers.mass_matrices_3d as mass_3d +from struphy.utils.arrays import xp as np class MHDOperatorsCore: @@ -58,11 +58,11 @@ def __init__(self, space, equilibrium, basis_u): self.subs_cum = [space.projectors.subs_cum for space in self.space.spaces] # get 1D indices of non-vanishing values of expressions dofs_0(N), dofs_0(D), dofs_1(N) and dofs_1(D) - self.dofs_0_N_i = [list(xp.nonzero(space.projectors.I.toarray())) for space in self.space.spaces] - self.dofs_1_D_i = [list(xp.nonzero(space.projectors.H.toarray())) for space in self.space.spaces] + self.dofs_0_N_i = [list(np.nonzero(space.projectors.I.toarray())) for space in self.space.spaces] + self.dofs_1_D_i = [list(np.nonzero(space.projectors.H.toarray())) for space in self.space.spaces] - self.dofs_0_D_i = [list(xp.nonzero(space.projectors.ID.toarray())) for space in self.space.spaces] - self.dofs_1_N_i = [list(xp.nonzero(space.projectors.HN.toarray())) for space in self.space.spaces] + self.dofs_0_D_i = [list(np.nonzero(space.projectors.ID.toarray())) for space in self.space.spaces] + self.dofs_1_N_i = [list(np.nonzero(space.projectors.HN.toarray())) for space in self.space.spaces] for i in range(self.space.dim): for j in range(2): @@ -116,9 +116,9 @@ def get_blocks_EF(self, pol=True): B2_3_pts = B2_3_pts.reshape(self.nhis[0], self.nq[0], self.nint[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) ker.rhs11_2d( self.dofs_1_N_i[0][0], @@ -130,8 +130,8 @@ def get_blocks_EF(self, pol=True): self.wts[0], self.basis_his_N[0], self.basis_int_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_3_pts, val, row, @@ -139,8 +139,7 @@ def get_blocks_EF(self, pol=True): ) EF_12 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[0] // self.N3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_1form[0] // self.N3, self.space.Ntot_0form // self.N3) ) EF_12.eliminate_zeros() # ---------------------------------------------------- @@ -151,9 +150,9 @@ def get_blocks_EF(self, pol=True): B2_2_pts = B2_2_pts.reshape(self.nhis[0], self.nq[0], self.nint[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) ker.rhs11_2d( self.dofs_1_N_i[0][0], @@ -165,8 +164,8 @@ def get_blocks_EF(self, pol=True): self.wts[0], self.basis_his_N[0], self.basis_int_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_2_pts, val, row, @@ -174,8 +173,7 @@ def get_blocks_EF(self, pol=True): ) EF_13 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[0] // self.N3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_1form[0] // self.N3, self.space.Ntot_0form // self.N3) ) EF_13.eliminate_zeros() # ---------------------------------------------------- @@ -186,9 +184,9 @@ def get_blocks_EF(self, pol=True): B2_3_pts = B2_3_pts.reshape(self.nint[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) ker.rhs12_2d( self.dofs_0_N_i[0][0], @@ -200,8 +198,8 @@ def get_blocks_EF(self, pol=True): self.wts[1], self.basis_int_N[0], self.basis_his_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_3_pts, val, row, @@ -209,8 +207,7 @@ def get_blocks_EF(self, pol=True): ) EF_21 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[1] // self.N3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_1form[1] // self.N3, self.space.Ntot_0form // self.N3) ) EF_21.eliminate_zeros() # ---------------------------------------------------- @@ -221,9 +218,9 @@ def get_blocks_EF(self, pol=True): B2_1_pts = B2_1_pts.reshape(self.nint[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) ker.rhs12_2d( self.dofs_0_N_i[0][0], @@ -235,8 +232,8 @@ def get_blocks_EF(self, pol=True): self.wts[1], self.basis_int_N[0], self.basis_his_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_1_pts, val, row, @@ -244,8 +241,7 @@ def get_blocks_EF(self, pol=True): ) EF_23 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[1] // self.N3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_1form[1] // self.N3, self.space.Ntot_0form // self.N3) ) EF_23.eliminate_zeros() # ---------------------------------------------------- @@ -255,9 +251,9 @@ def get_blocks_EF(self, pol=True): B2_2_pts = self.equilibrium.b2_2(self.eta_int[0], self.eta_int[1], 0.0) # assemble sparse matrix - val = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) ker.rhs0_2d( self.dofs_0_N_i[0][0], @@ -273,8 +269,7 @@ def get_blocks_EF(self, pol=True): ) EF_31 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[2] // self.D3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_1form[2] // self.D3, self.space.Ntot_0form // self.N3) ) EF_31.eliminate_zeros() # ---------------------------------------------------- @@ -284,9 +279,9 @@ def get_blocks_EF(self, pol=True): B2_1_pts = self.equilibrium.b2_1(self.eta_int[0], self.eta_int[1], 0.0) # assemble sparse matrix - val = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) ker.rhs0_2d( self.dofs_0_N_i[0][0], @@ -302,8 +297,7 @@ def get_blocks_EF(self, pol=True): ) EF_32 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[2] // self.D3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_1form[2] // self.D3, self.space.Ntot_0form // self.N3) ) EF_32.eliminate_zeros() # ---------------------------------------------------- @@ -315,17 +309,14 @@ def get_blocks_EF(self, pol=True): B2_3_pts = B2_3_pts.reshape(self.nhis[0], self.nq[0], self.nint[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) ker.rhs11( @@ -341,8 +332,8 @@ def get_blocks_EF(self, pol=True): self.basis_his_N[0], self.basis_int_N[1], self.basis_int_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_3_pts, val, row, @@ -359,17 +350,14 @@ def get_blocks_EF(self, pol=True): B2_2_pts = B2_2_pts.reshape(self.nhis[0], self.nq[0], self.nint[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) ker.rhs11( @@ -385,8 +373,8 @@ def get_blocks_EF(self, pol=True): self.basis_his_N[0], self.basis_int_N[1], self.basis_int_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_2_pts, val, row, @@ -403,17 +391,14 @@ def get_blocks_EF(self, pol=True): B2_3_pts = B2_3_pts.reshape(self.nint[0], self.nhis[1], self.nq[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) ker.rhs12( @@ -429,8 +414,8 @@ def get_blocks_EF(self, pol=True): self.basis_int_N[0], self.basis_his_N[1], self.basis_int_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_3_pts, val, row, @@ -447,17 +432,14 @@ def get_blocks_EF(self, pol=True): B2_1_pts = B2_1_pts.reshape(self.nint[0], self.nhis[1], self.nq[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) ker.rhs12( @@ -473,8 +455,8 @@ def get_blocks_EF(self, pol=True): self.basis_int_N[0], self.basis_his_N[1], self.basis_int_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_1_pts, val, row, @@ -491,17 +473,14 @@ def get_blocks_EF(self, pol=True): B2_2_pts = B2_2_pts.reshape(self.nint[0], self.nint[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=int ) ker.rhs13( @@ -517,8 +496,8 @@ def get_blocks_EF(self, pol=True): self.basis_int_N[0], self.basis_int_N[1], self.basis_his_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_2_pts, val, row, @@ -535,17 +514,14 @@ def get_blocks_EF(self, pol=True): B2_1_pts = B2_1_pts.reshape(self.nint[0], self.nint[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=int ) ker.rhs13( @@ -561,8 +537,8 @@ def get_blocks_EF(self, pol=True): self.basis_int_N[0], self.basis_int_N[1], self.basis_his_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_1_pts, val, row, @@ -585,9 +561,9 @@ def get_blocks_EF(self, pol=True): det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nint[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) ker.rhs11_2d( self.dofs_1_D_i[0][0], @@ -599,8 +575,8 @@ def get_blocks_EF(self, pol=True): self.wts[0], self.basis_his_D[0], self.basis_int_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_3_pts / det_dF, val, row, @@ -608,8 +584,7 @@ def get_blocks_EF(self, pol=True): ) EF_12 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[0] // self.N3, self.space.Ntot_2form[1] // self.D3), + (val, (row, col)), shape=(self.space.Ntot_1form[0] // self.N3, self.space.Ntot_2form[1] // self.D3) ) EF_12.eliminate_zeros() # ---------------------------------------------------- @@ -624,9 +599,9 @@ def get_blocks_EF(self, pol=True): det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nint[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=int) ker.rhs11_2d( self.dofs_1_D_i[0][0], @@ -638,8 +613,8 @@ def get_blocks_EF(self, pol=True): self.wts[0], self.basis_his_D[0], self.basis_int_D[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_2_pts / det_dF, val, row, @@ -647,8 +622,7 @@ def get_blocks_EF(self, pol=True): ) EF_13 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[0] // self.N3, self.space.Ntot_2form[2] // self.N3), + (val, (row, col)), shape=(self.space.Ntot_1form[0] // self.N3, self.space.Ntot_2form[2] // self.N3) ) EF_13.eliminate_zeros() # ---------------------------------------------------- @@ -663,9 +637,9 @@ def get_blocks_EF(self, pol=True): det_dF = det_dF.reshape(self.nint[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) ker.rhs12_2d( self.dofs_0_N_i[0][0], @@ -677,8 +651,8 @@ def get_blocks_EF(self, pol=True): self.wts[1], self.basis_int_N[0], self.basis_his_D[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_3_pts / det_dF, val, row, @@ -686,8 +660,7 @@ def get_blocks_EF(self, pol=True): ) EF_21 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[1] // self.N3, self.space.Ntot_2form[0] // self.D3), + (val, (row, col)), shape=(self.space.Ntot_1form[1] // self.N3, self.space.Ntot_2form[0] // self.D3) ) EF_21.eliminate_zeros() # ---------------------------------------------------- @@ -702,9 +675,9 @@ def get_blocks_EF(self, pol=True): det_dF = det_dF.reshape(self.nint[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) ker.rhs12_2d( self.dofs_0_D_i[0][0], @@ -716,8 +689,8 @@ def get_blocks_EF(self, pol=True): self.wts[1], self.basis_int_D[0], self.basis_his_D[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_1_pts / det_dF, val, row, @@ -725,8 +698,7 @@ def get_blocks_EF(self, pol=True): ) EF_23 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[1] // self.N3, self.space.Ntot_2form[2] // self.N3), + (val, (row, col)), shape=(self.space.Ntot_1form[1] // self.N3, self.space.Ntot_2form[2] // self.N3) ) EF_23.eliminate_zeros() # ---------------------------------------------------- @@ -739,9 +711,9 @@ def get_blocks_EF(self, pol=True): det_dF = abs(self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_int[1], 0.0)) # assemble sparse matrix - val = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size, dtype=int) ker.rhs0_2d( self.dofs_0_N_i[0][0], @@ -757,8 +729,7 @@ def get_blocks_EF(self, pol=True): ) EF_31 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[2] // self.D3, self.space.Ntot_2form[0] // self.D3), + (val, (row, col)), shape=(self.space.Ntot_1form[2] // self.D3, self.space.Ntot_2form[0] // self.D3) ) EF_31.eliminate_zeros() # ---------------------------------------------------- @@ -771,9 +742,9 @@ def get_blocks_EF(self, pol=True): det_dF = abs(self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_int[1], 0.0)) # assemble sparse matrix - val = xp.empty(self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) ker.rhs0_2d( self.dofs_0_D_i[0][0], @@ -789,8 +760,7 @@ def get_blocks_EF(self, pol=True): ) EF_32 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_1form[2] // self.D3, self.space.Ntot_2form[1] // self.D3), + (val, (row, col)), shape=(self.space.Ntot_1form[2] // self.D3, self.space.Ntot_2form[1] // self.D3) ) EF_32.eliminate_zeros() # ---------------------------------------------------- @@ -803,22 +773,19 @@ def get_blocks_EF(self, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( - self.equilibrium.domain.jacobian_det(self.eta_his[0].flatten(), self.eta_int[1], self.eta_int[2]), + self.equilibrium.domain.jacobian_det(self.eta_his[0].flatten(), self.eta_int[1], self.eta_int[2]) ) det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nint[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_D_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_D_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_D_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_D_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_D_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_0_D_i[2][0].size, dtype=int ) ker.rhs11( @@ -834,8 +801,8 @@ def get_blocks_EF(self, pol=True): self.basis_his_D[0], self.basis_int_N[1], self.basis_int_D[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_3_pts / det_dF, val, row, @@ -853,22 +820,19 @@ def get_blocks_EF(self, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( - self.equilibrium.domain.jacobian_det(self.eta_his[0].flatten(), self.eta_int[1], self.eta_int[2]), + self.equilibrium.domain.jacobian_det(self.eta_his[0].flatten(), self.eta_int[1], self.eta_int[2]) ) det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nint[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) ker.rhs11( @@ -884,8 +848,8 @@ def get_blocks_EF(self, pol=True): self.basis_his_D[0], self.basis_int_D[1], self.basis_int_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_2_pts / det_dF, val, row, @@ -903,22 +867,19 @@ def get_blocks_EF(self, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( - self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_his[1].flatten(), self.eta_int[2]), + self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_his[1].flatten(), self.eta_int[2]) ) det_dF = det_dF.reshape(self.nint[0], self.nhis[1], self.nq[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_D_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_D_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_D_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_D_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_D_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_D_i[2][0].size, dtype=int ) ker.rhs12( @@ -934,8 +895,8 @@ def get_blocks_EF(self, pol=True): self.basis_int_N[0], self.basis_his_D[1], self.basis_int_D[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_3_pts / det_dF, val, row, @@ -953,22 +914,19 @@ def get_blocks_EF(self, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( - self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_his[1].flatten(), self.eta_int[2]), + self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_his[1].flatten(), self.eta_int[2]) ) det_dF = det_dF.reshape(self.nint[0], self.nhis[1], self.nq[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) ker.rhs12( @@ -984,8 +942,8 @@ def get_blocks_EF(self, pol=True): self.basis_int_D[0], self.basis_his_D[1], self.basis_int_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_1_pts / det_dF, val, row, @@ -1003,22 +961,19 @@ def get_blocks_EF(self, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( - self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_int[1], self.eta_his[2].flatten()), + self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_int[1], self.eta_his[2].flatten()) ) det_dF = det_dF.reshape(self.nint[0], self.nint[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_0_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) ker.rhs13( @@ -1034,8 +989,8 @@ def get_blocks_EF(self, pol=True): self.basis_int_N[0], self.basis_int_D[1], self.basis_his_D[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), -B2_2_pts / det_dF, val, row, @@ -1053,22 +1008,19 @@ def get_blocks_EF(self, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( - self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_int[1], self.eta_his[2].flatten()), + self.equilibrium.domain.jacobian_det(self.eta_int[0], self.eta_int[1], self.eta_his[2].flatten()) ) det_dF = det_dF.reshape(self.nint[0], self.nint[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) ker.rhs13( @@ -1084,8 +1036,8 @@ def get_blocks_EF(self, pol=True): self.basis_int_D[0], self.basis_int_N[1], self.basis_his_D[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), B2_1_pts / det_dF, val, row, @@ -1141,9 +1093,9 @@ def get_blocks_FL(self, which, pol=True): EQ = EQ.reshape(self.nint[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) ker.rhs12_2d( self.dofs_0_N_i[0][0], @@ -1155,8 +1107,8 @@ def get_blocks_FL(self, which, pol=True): self.wts[1], self.basis_int_N[0], self.basis_his_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ, val, row, @@ -1164,8 +1116,7 @@ def get_blocks_FL(self, which, pol=True): ) F_11 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_2form[0] // self.D3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_2form[0] // self.D3, self.space.Ntot_0form // self.N3) ) F_11.eliminate_zeros() # ------------------------------------------------------------ @@ -1182,9 +1133,9 @@ def get_blocks_FL(self, which, pol=True): EQ = EQ.reshape(self.nhis[0], self.nq[0], self.nint[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) ker.rhs11_2d( self.dofs_1_N_i[0][0], @@ -1196,8 +1147,8 @@ def get_blocks_FL(self, which, pol=True): self.wts[0], self.basis_his_N[0], self.basis_int_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ, val, row, @@ -1205,8 +1156,7 @@ def get_blocks_FL(self, which, pol=True): ) F_22 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_2form[1] // self.D3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_2form[1] // self.D3, self.space.Ntot_0form // self.N3) ) F_22.eliminate_zeros() # ------------------------------------------------------------ @@ -1223,9 +1173,9 @@ def get_blocks_FL(self, which, pol=True): EQ = EQ.reshape(self.nhis[0], self.nq[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size, dtype=int) ker.rhs2_2d( self.dofs_1_N_i[0][0], @@ -1240,8 +1190,8 @@ def get_blocks_FL(self, which, pol=True): self.wts[1], self.basis_his_N[0], self.basis_his_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ, val, row, @@ -1249,8 +1199,7 @@ def get_blocks_FL(self, which, pol=True): ) F_33 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_2form[2] // self.N3, self.space.Ntot_0form // self.N3), + (val, (row, col)), shape=(self.space.Ntot_2form[2] // self.N3, self.space.Ntot_0form // self.N3) ) F_33.eliminate_zeros() # ------------------------------------------------------------ @@ -1264,25 +1213,20 @@ def get_blocks_FL(self, which, pol=True): EQ = self.equilibrium.p3(self.eta_int[0], self.eta_his[1].flatten(), self.eta_his[2].flatten()) else: EQ = self.equilibrium.domain.jacobian_det( - self.eta_int[0], - self.eta_his[1].flatten(), - self.eta_his[2].flatten(), + self.eta_int[0], self.eta_his[1].flatten(), self.eta_his[2].flatten() ) EQ = EQ.reshape(self.nint[0], self.nhis[1], self.nq[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=int ) ker.rhs21( @@ -1301,8 +1245,8 @@ def get_blocks_FL(self, which, pol=True): self.basis_int_N[0], self.basis_his_N[1], self.basis_his_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ, val, row, @@ -1321,25 +1265,20 @@ def get_blocks_FL(self, which, pol=True): EQ = self.equilibrium.p3(self.eta_his[0].flatten(), self.eta_int[1], self.eta_his[2].flatten()) else: EQ = self.equilibrium.domain.jacobian_det( - self.eta_his[0].flatten(), - self.eta_int[1], - self.eta_his[2].flatten(), + self.eta_his[0].flatten(), self.eta_int[1], self.eta_his[2].flatten() ) EQ = EQ.reshape(self.nhis[0], self.nq[0], self.nint[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_N_i[2][0].size, dtype=int ) ker.rhs22( @@ -1358,8 +1297,8 @@ def get_blocks_FL(self, which, pol=True): self.basis_his_N[0], self.basis_int_N[1], self.basis_his_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ, val, row, @@ -1378,25 +1317,20 @@ def get_blocks_FL(self, which, pol=True): EQ = self.equilibrium.p3(self.eta_his[0].flatten(), self.eta_his[1].flatten(), self.eta_int[2]) else: EQ = self.equilibrium.domain.jacobian_det( - self.eta_his[0].flatten(), - self.eta_his[1].flatten(), - self.eta_int[2], + self.eta_his[0].flatten(), self.eta_his[1].flatten(), self.eta_int[2] ) EQ = EQ.reshape(self.nhis[0], self.nq[0], self.nhis[1], self.nq[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_N_i[0][0].size * self.dofs_1_N_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) ker.rhs23( @@ -1415,8 +1349,8 @@ def get_blocks_FL(self, which, pol=True): self.basis_his_N[0], self.basis_his_N[1], self.basis_int_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ, val, row, @@ -1443,9 +1377,9 @@ def get_blocks_FL(self, which, pol=True): det_dF = det_dF.reshape(self.nint[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + val = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) + row = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + col = np.empty(self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) ker.rhs12_2d( self.dofs_0_N_i[0][0], @@ -1457,8 +1391,8 @@ def get_blocks_FL(self, which, pol=True): self.wts[1], self.basis_int_N[0], self.basis_his_D[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ / det_dF, val, row, @@ -1466,8 +1400,7 @@ def get_blocks_FL(self, which, pol=True): ) F_11 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_2form[0] // self.D3, self.space.Ntot_2form[0] // self.D3), + (val, (row, col)), shape=(self.space.Ntot_2form[0] // self.D3, self.space.Ntot_2form[0] // self.D3) ) F_11.eliminate_zeros() # ------------------------------------------------------------ @@ -1486,9 +1419,9 @@ def get_blocks_FL(self, which, pol=True): det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nint[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size, dtype=int) ker.rhs11_2d( self.dofs_1_D_i[0][0], @@ -1500,8 +1433,8 @@ def get_blocks_FL(self, which, pol=True): self.wts[0], self.basis_his_D[0], self.basis_int_N[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ / det_dF, val, row, @@ -1509,8 +1442,7 @@ def get_blocks_FL(self, which, pol=True): ) F_22 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_2form[1] // self.D3, self.space.Ntot_2form[1] // self.D3), + (val, (row, col)), shape=(self.space.Ntot_2form[1] // self.D3, self.space.Ntot_2form[1] // self.D3) ) F_22.eliminate_zeros() # ------------------------------------------------------------ @@ -1526,14 +1458,14 @@ def get_blocks_FL(self, which, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( - self.equilibrium.domain.jacobian_det(self.eta_his[0].flatten(), self.eta_his[1].flatten(), 0.0), + self.equilibrium.domain.jacobian_det(self.eta_his[0].flatten(), self.eta_his[1].flatten(), 0.0) ) det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) ker.rhs2_2d( self.dofs_1_D_i[0][0], @@ -1548,8 +1480,8 @@ def get_blocks_FL(self, which, pol=True): self.wts[1], self.basis_his_D[0], self.basis_his_D[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ / det_dF, val, row, @@ -1557,8 +1489,7 @@ def get_blocks_FL(self, which, pol=True): ) F_33 = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_2form[2] // self.N3, self.space.Ntot_2form[2] // self.N3), + (val, (row, col)), shape=(self.space.Ntot_2form[2] // self.N3, self.space.Ntot_2form[2] // self.N3) ) F_33.eliminate_zeros() # ------------------------------------------------------------ @@ -1576,25 +1507,20 @@ def get_blocks_FL(self, which, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( self.equilibrium.domain.jacobian_det( - self.eta_int[0], - self.eta_his[1].flatten(), - self.eta_his[2].flatten(), - ), + self.eta_int[0], self.eta_his[1].flatten(), self.eta_his[2].flatten() + ) ) det_dF = det_dF.reshape(self.nint[0], self.nhis[1], self.nq[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_0_N_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) ker.rhs21( @@ -1613,8 +1539,8 @@ def get_blocks_FL(self, which, pol=True): self.basis_int_N[0], self.basis_his_D[1], self.basis_his_D[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ / det_dF, val, row, @@ -1637,25 +1563,20 @@ def get_blocks_FL(self, which, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( self.equilibrium.domain.jacobian_det( - self.eta_his[0].flatten(), - self.eta_int[1], - self.eta_his[2].flatten(), - ), + self.eta_his[0].flatten(), self.eta_int[1], self.eta_his[2].flatten() + ) ) det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nint[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_0_N_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) ker.rhs22( @@ -1674,8 +1595,8 @@ def get_blocks_FL(self, which, pol=True): self.basis_his_D[0], self.basis_int_N[1], self.basis_his_D[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ / det_dF, val, row, @@ -1698,25 +1619,20 @@ def get_blocks_FL(self, which, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( self.equilibrium.domain.jacobian_det( - self.eta_his[0].flatten(), - self.eta_his[1].flatten(), - self.eta_int[2], - ), + self.eta_his[0].flatten(), self.eta_his[1].flatten(), self.eta_int[2] + ) ) det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nhis[1], self.nq[1], self.nint[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_0_N_i[2][0].size, dtype=int ) ker.rhs23( @@ -1735,8 +1651,8 @@ def get_blocks_FL(self, which, pol=True): self.basis_his_D[0], self.basis_his_D[1], self.basis_int_N[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), EQ / det_dF, val, row, @@ -1775,14 +1691,14 @@ def get_blocks_PR(self, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( - self.equilibrium.domain.jacobian_det(self.eta_his[0].flatten(), self.eta_his[1].flatten(), 0.0), + self.equilibrium.domain.jacobian_det(self.eta_his[0].flatten(), self.eta_his[1].flatten(), 0.0) ) det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nhis[1], self.nq[1]) # assemble sparse matrix - val = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) - row = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) - col = xp.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + val = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=float) + row = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) + col = np.empty(self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size, dtype=int) ker.rhs2_2d( self.dofs_1_D_i[0][0], @@ -1797,8 +1713,8 @@ def get_blocks_PR(self, pol=True): self.wts[1], self.basis_his_D[0], self.basis_his_D[1], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), P3_pts / det_dF, val, row, @@ -1806,8 +1722,7 @@ def get_blocks_PR(self, pol=True): ) PR = spa.csr_matrix( - (val, (row, col)), - shape=(self.space.Ntot_3form // self.D3, self.space.Ntot_3form // self.D3), + (val, (row, col)), shape=(self.space.Ntot_3form // self.D3, self.space.Ntot_3form // self.D3) ) PR.eliminate_zeros() # ----------------------------------------------------- @@ -1816,9 +1731,7 @@ def get_blocks_PR(self, pol=True): # --------------- ([his, his, his] of DDD) ------------ # evaluate equilibrium pressure at quadrature points P3_pts = self.equilibrium.p3( - self.eta_his[0].flatten(), - self.eta_his[1].flatten(), - self.eta_his[2].flatten(), + self.eta_his[0].flatten(), self.eta_his[1].flatten(), self.eta_his[2].flatten() ) P3_pts = P3_pts.reshape(self.nhis[0], self.nq[0], self.nhis[1], self.nq[1], self.nhis[2], self.nq[2]) @@ -1826,25 +1739,20 @@ def get_blocks_PR(self, pol=True): # evaluate Jacobian determinant at at interpolation and quadrature points det_dF = abs( self.equilibrium.domain.jacobian_det( - self.eta_his[0].flatten(), - self.eta_his[1].flatten(), - self.eta_his[2].flatten(), - ), + self.eta_his[0].flatten(), self.eta_his[1].flatten(), self.eta_his[2].flatten() + ) ) det_dF = det_dF.reshape(self.nhis[0], self.nq[0], self.nhis[1], self.nq[1], self.nhis[2], self.nq[2]) # assemble sparse matrix - val = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=float, + val = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=float ) - row = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + row = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) - col = xp.empty( - self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, - dtype=int, + col = np.empty( + self.dofs_1_D_i[0][0].size * self.dofs_1_D_i[1][0].size * self.dofs_1_D_i[2][0].size, dtype=int ) ker.rhs3( @@ -1866,8 +1774,8 @@ def get_blocks_PR(self, pol=True): self.basis_his_D[0], self.basis_his_D[1], self.basis_his_D[2], - xp.array(self.space.NbaseN), - xp.array(self.space.NbaseD), + np.array(self.space.NbaseN), + np.array(self.space.NbaseD), P3_pts / det_dF, val, row, diff --git a/src/struphy/eigenvalue_solvers/projectors_global.py b/src/struphy/eigenvalue_solvers/projectors_global.py index 9d246cdac..208ff701a 100644 --- a/src/struphy/eigenvalue_solvers/projectors_global.py +++ b/src/struphy/eigenvalue_solvers/projectors_global.py @@ -6,11 +6,11 @@ Classes for commuting projectors in 1D, 2D and 3D based on global spline interpolation and histopolation. """ -import cunumpy as xp import scipy.sparse as spa import struphy.bsplines.bsplines as bsp from struphy.linear_algebra.linalg_kron import kron_lusolve_2d, kron_lusolve_3d, kron_matvec_2d, kron_matvec_3d +from struphy.utils.arrays import xp as np # ======================= 1d ==================================== @@ -156,20 +156,20 @@ def __init__(self, spline_space, n_quad=6): self.n_quad = n_quad # Gauss - Legendre quadrature points and weights in (-1, 1) - self.pts_loc = xp.polynomial.legendre.leggauss(self.n_quad)[0] - self.wts_loc = xp.polynomial.legendre.leggauss(self.n_quad)[1] + self.pts_loc = np.polynomial.legendre.leggauss(self.n_quad)[0] + self.wts_loc = np.polynomial.legendre.leggauss(self.n_quad)[1] # set interpolation points (Greville points) self.x_int = spline_space.greville.copy() # set number of sub-intervals per integration interval between Greville points and integration boundaries - self.subs = xp.ones(spline_space.NbaseD, dtype=int) - self.x_his = xp.array([self.x_int[0]]) + self.subs = np.ones(spline_space.NbaseD, dtype=int) + self.x_his = np.array([self.x_int[0]]) for i in range(spline_space.NbaseD): for br in spline_space.el_b: # left and right integration boundaries - if not spline_space.spl_kind: + if spline_space.spl_kind == False: xl = self.x_int[i] xr = self.x_int[i + 1] else: @@ -181,16 +181,16 @@ def __init__(self, spline_space, n_quad=6): # compute subs and x_his if (br > xl + 1e-10) and (br < xr - 1e-10): self.subs[i] += 1 - self.x_his = xp.append(self.x_his, br) + self.x_his = np.append(self.x_his, br) elif br >= xr - 1e-10: - self.x_his = xp.append(self.x_his, xr) + self.x_his = np.append(self.x_his, xr) break - if spline_space.spl_kind and spline_space.p % 2 == 0: - self.x_his = xp.append(self.x_his, spline_space.el_b[-1] + self.x_his[0]) + if spline_space.spl_kind == True and spline_space.p % 2 == 0: + self.x_his = np.append(self.x_his, spline_space.el_b[-1] + self.x_his[0]) # cumulative number of sub-intervals for conversion local interval --> global interval - self.subs_cum = xp.append(0, xp.cumsum(self.subs - 1)[:-1]) + self.subs_cum = np.append(0, np.cumsum(self.subs - 1)[:-1]) # quadrature points and weights self.pts, self.wts = bsp.quadrature_grid(self.x_his, self.pts_loc, self.wts_loc) @@ -198,33 +198,33 @@ def __init__(self, spline_space, n_quad=6): # quadrature points and weights, ignoring subs (less accurate integration for even degree) self.x_hisG = self.x_int - if spline_space.spl_kind: + if spline_space.spl_kind == True: if spline_space.p % 2 == 0: - self.x_hisG = xp.append(self.x_hisG, spline_space.el_b[-1] + self.x_hisG[0]) + self.x_hisG = np.append(self.x_hisG, spline_space.el_b[-1] + self.x_hisG[0]) else: - self.x_hisG = xp.append(self.x_hisG, spline_space.el_b[-1]) + self.x_hisG = np.append(self.x_hisG, spline_space.el_b[-1]) self.ptsG, self.wtsG = bsp.quadrature_grid(self.x_hisG, self.pts_loc, self.wts_loc) self.ptsG = self.ptsG % spline_space.el_b[-1] # Knot span indices at interpolation points in format (greville, 0) - self.span_x_int_N = xp.zeros(self.x_int[:, None].shape, dtype=int) - self.span_x_int_D = xp.zeros(self.x_int[:, None].shape, dtype=int) + self.span_x_int_N = np.zeros(self.x_int[:, None].shape, dtype=int) + self.span_x_int_D = np.zeros(self.x_int[:, None].shape, dtype=int) for i in range(self.x_int.shape[0]): self.span_x_int_N[i, 0] = bsp.find_span(self.space.T, self.space.p, self.x_int[i]) self.span_x_int_D[i, 0] = bsp.find_span(self.space.t, self.space.p - 1, self.x_int[i]) # Knot span indices at quadrature points between x_int in format (i, iq) - self.span_ptsG_N = xp.zeros(self.ptsG.shape, dtype=int) - self.span_ptsG_D = xp.zeros(self.ptsG.shape, dtype=int) + self.span_ptsG_N = np.zeros(self.ptsG.shape, dtype=int) + self.span_ptsG_D = np.zeros(self.ptsG.shape, dtype=int) for i in range(self.ptsG.shape[0]): for iq in range(self.ptsG.shape[1]): self.span_ptsG_N[i, iq] = bsp.find_span(self.space.T, self.space.p, self.ptsG[i, iq]) self.span_ptsG_D[i, iq] = bsp.find_span(self.space.t, self.space.p - 1, self.ptsG[i, iq]) # Values of p + 1 non-zero basis functions at Greville points in format (greville, 0, basis function) - self.basis_x_int_N = xp.zeros((*self.x_int[:, None].shape, self.space.p + 1), dtype=float) - self.basis_x_int_D = xp.zeros((*self.x_int[:, None].shape, self.space.p), dtype=float) + self.basis_x_int_N = np.zeros((*self.x_int[:, None].shape, self.space.p + 1), dtype=float) + self.basis_x_int_D = np.zeros((*self.x_int[:, None].shape, self.space.p), dtype=float) N_temp = bsp.basis_ders_on_quad_grid(self.space.T, self.space.p, self.x_int[:, None], 0, normalize=False) D_temp = bsp.basis_ders_on_quad_grid(self.space.t, self.space.p - 1, self.x_int[:, None], 0, normalize=True) @@ -236,8 +236,8 @@ def __init__(self, spline_space, n_quad=6): self.basis_x_int_D[i, 0, b] = D_temp[i, b, 0, 0] # Values of p + 1 non-zero basis functions at quadrature points points between x_int in format (i, iq, basis function) - self.basis_ptsG_N = xp.zeros((*self.ptsG.shape, self.space.p + 1), dtype=float) - self.basis_ptsG_D = xp.zeros((*self.ptsG.shape, self.space.p), dtype=float) + self.basis_ptsG_N = np.zeros((*self.ptsG.shape, self.space.p + 1), dtype=float) + self.basis_ptsG_D = np.zeros((*self.ptsG.shape, self.space.p), dtype=float) N_temp = bsp.basis_ders_on_quad_grid(self.space.T, self.space.p, self.ptsG, 0, normalize=False) D_temp = bsp.basis_ders_on_quad_grid(self.space.t, self.space.p - 1, self.ptsG, 0, normalize=True) @@ -250,7 +250,7 @@ def __init__(self, spline_space, n_quad=6): self.basis_ptsG_D[i, iq, b] = D_temp[i, b, 0, iq] # quadrature matrix for performing integrations as matrix-vector products - self.Q = xp.zeros((spline_space.NbaseD, self.wts.shape[0] * self.n_quad), dtype=float) + self.Q = np.zeros((spline_space.NbaseD, self.wts.shape[0] * self.n_quad), dtype=float) for i in range(spline_space.NbaseD): for j in range(self.subs[i]): @@ -260,7 +260,7 @@ def __init__(self, spline_space, n_quad=6): self.Q = spa.csr_matrix(self.Q) # quadrature matrix for performing integrations as matrix-vector products, ignoring subs (less accurate integration for even degree) - self.QG = xp.zeros((spline_space.NbaseD, self.wtsG.shape[0] * self.n_quad), dtype=float) + self.QG = np.zeros((spline_space.NbaseD, self.wtsG.shape[0] * self.n_quad), dtype=float) for i in range(spline_space.NbaseD): self.QG[i, self.n_quad * i : self.n_quad * (i + 1)] = self.wtsG[i] @@ -271,18 +271,10 @@ def __init__(self, spline_space, n_quad=6): BM_splines = [False, True] self.N_int = bsp.collocation_matrix( - spline_space.T, - spline_space.p - 0, - self.x_int, - spline_space.spl_kind, - BM_splines[0], + spline_space.T, spline_space.p - 0, self.x_int, spline_space.spl_kind, BM_splines[0] ) self.D_int = bsp.collocation_matrix( - spline_space.t, - spline_space.p - 1, - self.x_int, - spline_space.spl_kind, - BM_splines[1], + spline_space.t, spline_space.p - 1, self.x_int, spline_space.spl_kind, BM_splines[1] ) self.N_int[self.N_int < 1e-12] = 0.0 @@ -292,18 +284,10 @@ def __init__(self, spline_space, n_quad=6): self.D_int = spa.csr_matrix(self.D_int) self.N_pts = bsp.collocation_matrix( - spline_space.T, - spline_space.p - 0, - self.pts.flatten(), - spline_space.spl_kind, - BM_splines[0], + spline_space.T, spline_space.p - 0, self.pts.flatten(), spline_space.spl_kind, BM_splines[0] ) self.D_pts = bsp.collocation_matrix( - spline_space.t, - spline_space.p - 1, - self.pts.flatten(), - spline_space.spl_kind, - BM_splines[1], + spline_space.t, spline_space.p - 1, self.pts.flatten(), spline_space.spl_kind, BM_splines[1] ) self.N_pts = spa.csr_matrix(self.N_pts) @@ -415,17 +399,17 @@ def dofs_1d_bases_products(self, space): dofs_1_i(D_j*D_k). """ - dofs_0_NN = xp.empty((space.NbaseN, space.NbaseN, space.NbaseN), dtype=float) - dofs_0_DN = xp.empty((space.NbaseN, space.NbaseD, space.NbaseN), dtype=float) - dofs_0_DD = xp.empty((space.NbaseN, space.NbaseD, space.NbaseD), dtype=float) + dofs_0_NN = np.empty((space.NbaseN, space.NbaseN, space.NbaseN), dtype=float) + dofs_0_DN = np.empty((space.NbaseN, space.NbaseD, space.NbaseN), dtype=float) + dofs_0_DD = np.empty((space.NbaseN, space.NbaseD, space.NbaseD), dtype=float) - dofs_1_NN = xp.empty((space.NbaseD, space.NbaseN, space.NbaseN), dtype=float) - dofs_1_DN = xp.empty((space.NbaseD, space.NbaseD, space.NbaseN), dtype=float) - dofs_1_DD = xp.empty((space.NbaseD, space.NbaseD, space.NbaseD), dtype=float) + dofs_1_NN = np.empty((space.NbaseD, space.NbaseN, space.NbaseN), dtype=float) + dofs_1_DN = np.empty((space.NbaseD, space.NbaseD, space.NbaseN), dtype=float) + dofs_1_DD = np.empty((space.NbaseD, space.NbaseD, space.NbaseD), dtype=float) # ========= dofs_0_NN and dofs_1_NN ============== - cj = xp.zeros(space.NbaseN, dtype=float) - ck = xp.zeros(space.NbaseN, dtype=float) + cj = np.zeros(space.NbaseN, dtype=float) + ck = np.zeros(space.NbaseN, dtype=float) for j in range(space.NbaseN): for k in range(space.NbaseN): @@ -442,8 +426,8 @@ def N_jN_k(eta): dofs_1_NN[:, j, k] = self.dofs_1(N_jN_k) # ========= dofs_0_DN and dofs_1_DN ============== - cj = xp.zeros(space.NbaseD, dtype=float) - ck = xp.zeros(space.NbaseN, dtype=float) + cj = np.zeros(space.NbaseD, dtype=float) + ck = np.zeros(space.NbaseN, dtype=float) for j in range(space.NbaseD): for k in range(space.NbaseN): @@ -460,8 +444,8 @@ def D_jN_k(eta): dofs_1_DN[:, j, k] = self.dofs_1(D_jN_k) # ========= dofs_0_DD and dofs_1_DD ============= - cj = xp.zeros(space.NbaseD, dtype=float) - ck = xp.zeros(space.NbaseD, dtype=float) + cj = np.zeros(space.NbaseD, dtype=float) + ck = np.zeros(space.NbaseD, dtype=float) for j in range(space.NbaseD): for k in range(space.NbaseD): @@ -477,110 +461,110 @@ def D_jD_k(eta): dofs_0_DD[:, j, k] = self.dofs_0(D_jD_k) dofs_1_DD[:, j, k] = self.dofs_1(D_jD_k) - dofs_0_ND = xp.transpose(dofs_0_DN, (0, 2, 1)) - dofs_1_ND = xp.transpose(dofs_1_DN, (0, 2, 1)) + dofs_0_ND = np.transpose(dofs_0_DN, (0, 2, 1)) + dofs_1_ND = np.transpose(dofs_1_DN, (0, 2, 1)) # find non-zero entries - dofs_0_NN_indices = xp.nonzero(dofs_0_NN) - dofs_0_DN_indices = xp.nonzero(dofs_0_DN) - dofs_0_ND_indices = xp.nonzero(dofs_0_ND) - dofs_0_DD_indices = xp.nonzero(dofs_0_DD) - - dofs_1_NN_indices = xp.nonzero(dofs_1_NN) - dofs_1_DN_indices = xp.nonzero(dofs_1_DN) - dofs_1_ND_indices = xp.nonzero(dofs_1_ND) - dofs_1_DD_indices = xp.nonzero(dofs_1_DD) - - dofs_0_NN_i_red = xp.empty(dofs_0_NN_indices[0].size, dtype=int) - dofs_0_DN_i_red = xp.empty(dofs_0_DN_indices[0].size, dtype=int) - dofs_0_ND_i_red = xp.empty(dofs_0_ND_indices[0].size, dtype=int) - dofs_0_DD_i_red = xp.empty(dofs_0_DD_indices[0].size, dtype=int) - - dofs_1_NN_i_red = xp.empty(dofs_1_NN_indices[0].size, dtype=int) - dofs_1_DN_i_red = xp.empty(dofs_1_DN_indices[0].size, dtype=int) - dofs_1_ND_i_red = xp.empty(dofs_1_ND_indices[0].size, dtype=int) - dofs_1_DD_i_red = xp.empty(dofs_1_DD_indices[0].size, dtype=int) + dofs_0_NN_indices = np.nonzero(dofs_0_NN) + dofs_0_DN_indices = np.nonzero(dofs_0_DN) + dofs_0_ND_indices = np.nonzero(dofs_0_ND) + dofs_0_DD_indices = np.nonzero(dofs_0_DD) + + dofs_1_NN_indices = np.nonzero(dofs_1_NN) + dofs_1_DN_indices = np.nonzero(dofs_1_DN) + dofs_1_ND_indices = np.nonzero(dofs_1_ND) + dofs_1_DD_indices = np.nonzero(dofs_1_DD) + + dofs_0_NN_i_red = np.empty(dofs_0_NN_indices[0].size, dtype=int) + dofs_0_DN_i_red = np.empty(dofs_0_DN_indices[0].size, dtype=int) + dofs_0_ND_i_red = np.empty(dofs_0_ND_indices[0].size, dtype=int) + dofs_0_DD_i_red = np.empty(dofs_0_DD_indices[0].size, dtype=int) + + dofs_1_NN_i_red = np.empty(dofs_1_NN_indices[0].size, dtype=int) + dofs_1_DN_i_red = np.empty(dofs_1_DN_indices[0].size, dtype=int) + dofs_1_ND_i_red = np.empty(dofs_1_ND_indices[0].size, dtype=int) + dofs_1_DD_i_red = np.empty(dofs_1_DD_indices[0].size, dtype=int) # ================================ nv = space.NbaseN * dofs_0_NN_indices[1] + dofs_0_NN_indices[2] - un = xp.unique(nv) + un = np.unique(nv) for i in range(dofs_0_NN_indices[0].size): - dofs_0_NN_i_red[i] = xp.nonzero(un == nv[i])[0] + dofs_0_NN_i_red[i] = np.nonzero(un == nv[i])[0] # ================================ nv = space.NbaseN * dofs_0_DN_indices[1] + dofs_0_DN_indices[2] - un = xp.unique(nv) + un = np.unique(nv) for i in range(dofs_0_DN_indices[0].size): - dofs_0_DN_i_red[i] = xp.nonzero(un == nv[i])[0] + dofs_0_DN_i_red[i] = np.nonzero(un == nv[i])[0] # ================================ nv = space.NbaseD * dofs_0_ND_indices[1] + dofs_0_ND_indices[2] - un = xp.unique(nv) + un = np.unique(nv) for i in range(dofs_0_ND_indices[0].size): - dofs_0_ND_i_red[i] = xp.nonzero(un == nv[i])[0] + dofs_0_ND_i_red[i] = np.nonzero(un == nv[i])[0] # ================================ nv = space.NbaseD * dofs_0_DD_indices[1] + dofs_0_DD_indices[2] - un = xp.unique(nv) + un = np.unique(nv) for i in range(dofs_0_DD_indices[0].size): - dofs_0_DD_i_red[i] = xp.nonzero(un == nv[i])[0] + dofs_0_DD_i_red[i] = np.nonzero(un == nv[i])[0] # ================================ nv = space.NbaseN * dofs_1_NN_indices[1] + dofs_1_NN_indices[2] - un = xp.unique(nv) + un = np.unique(nv) for i in range(dofs_1_NN_indices[0].size): - dofs_1_NN_i_red[i] = xp.nonzero(un == nv[i])[0] + dofs_1_NN_i_red[i] = np.nonzero(un == nv[i])[0] # ================================ nv = space.NbaseN * dofs_1_DN_indices[1] + dofs_1_DN_indices[2] - un = xp.unique(nv) + un = np.unique(nv) for i in range(dofs_1_DN_indices[0].size): - dofs_1_DN_i_red[i] = xp.nonzero(un == nv[i])[0] + dofs_1_DN_i_red[i] = np.nonzero(un == nv[i])[0] # ================================ nv = space.NbaseD * dofs_1_ND_indices[1] + dofs_1_ND_indices[2] - un = xp.unique(nv) + un = np.unique(nv) for i in range(dofs_1_ND_indices[0].size): - dofs_1_ND_i_red[i] = xp.nonzero(un == nv[i])[0] + dofs_1_ND_i_red[i] = np.nonzero(un == nv[i])[0] # ================================ nv = space.NbaseD * dofs_1_DD_indices[1] + dofs_1_DD_indices[2] - un = xp.unique(nv) + un = np.unique(nv) for i in range(dofs_1_DD_indices[0].size): - dofs_1_DD_i_red[i] = xp.nonzero(un == nv[i])[0] + dofs_1_DD_i_red[i] = np.nonzero(un == nv[i])[0] - dofs_0_NN_indices = xp.vstack( - (dofs_0_NN_indices[0], dofs_0_NN_indices[1], dofs_0_NN_indices[2], dofs_0_NN_i_red), + dofs_0_NN_indices = np.vstack( + (dofs_0_NN_indices[0], dofs_0_NN_indices[1], dofs_0_NN_indices[2], dofs_0_NN_i_red) ) - dofs_0_DN_indices = xp.vstack( - (dofs_0_DN_indices[0], dofs_0_DN_indices[1], dofs_0_DN_indices[2], dofs_0_DN_i_red), + dofs_0_DN_indices = np.vstack( + (dofs_0_DN_indices[0], dofs_0_DN_indices[1], dofs_0_DN_indices[2], dofs_0_DN_i_red) ) - dofs_0_ND_indices = xp.vstack( - (dofs_0_ND_indices[0], dofs_0_ND_indices[1], dofs_0_ND_indices[2], dofs_0_ND_i_red), + dofs_0_ND_indices = np.vstack( + (dofs_0_ND_indices[0], dofs_0_ND_indices[1], dofs_0_ND_indices[2], dofs_0_ND_i_red) ) - dofs_0_DD_indices = xp.vstack( - (dofs_0_DD_indices[0], dofs_0_DD_indices[1], dofs_0_DD_indices[2], dofs_0_DD_i_red), + dofs_0_DD_indices = np.vstack( + (dofs_0_DD_indices[0], dofs_0_DD_indices[1], dofs_0_DD_indices[2], dofs_0_DD_i_red) ) - dofs_1_NN_indices = xp.vstack( - (dofs_1_NN_indices[0], dofs_1_NN_indices[1], dofs_1_NN_indices[2], dofs_1_NN_i_red), + dofs_1_NN_indices = np.vstack( + (dofs_1_NN_indices[0], dofs_1_NN_indices[1], dofs_1_NN_indices[2], dofs_1_NN_i_red) ) - dofs_1_DN_indices = xp.vstack( - (dofs_1_DN_indices[0], dofs_1_DN_indices[1], dofs_1_DN_indices[2], dofs_1_DN_i_red), + dofs_1_DN_indices = np.vstack( + (dofs_1_DN_indices[0], dofs_1_DN_indices[1], dofs_1_DN_indices[2], dofs_1_DN_i_red) ) - dofs_1_ND_indices = xp.vstack( - (dofs_1_ND_indices[0], dofs_1_ND_indices[1], dofs_1_ND_indices[2], dofs_1_ND_i_red), + dofs_1_ND_indices = np.vstack( + (dofs_1_ND_indices[0], dofs_1_ND_indices[1], dofs_1_ND_indices[2], dofs_1_ND_i_red) ) - dofs_1_DD_indices = xp.vstack( - (dofs_1_DD_indices[0], dofs_1_DD_indices[1], dofs_1_DD_indices[2], dofs_1_DD_i_red), + dofs_1_DD_indices = np.vstack( + (dofs_1_DD_indices[0], dofs_1_DD_indices[1], dofs_1_DD_indices[2], dofs_1_DD_i_red) ) return ( @@ -658,8 +642,8 @@ def eval_for_PI(self, comp, fun): pts_PI = self.pts_PI[comp] - pts1, pts2 = xp.meshgrid(pts_PI[0], pts_PI[1], indexing="ij") - # pts1, pts2 = xp.meshgrid(pts_PI[0], pts_PI[1], indexing='ij', sparse=True) # numpy >1.7 + pts1, pts2 = np.meshgrid(pts_PI[0], pts_PI[1], indexing="ij") + # pts1, pts2 = np.meshgrid(pts_PI[0], pts_PI[1], indexing='ij', sparse=True) # numpy >1.7 return fun(pts1, pts2) @@ -922,8 +906,8 @@ def eval_for_PI(self, comp, fun): pts_PI = self.pts_PI[comp] - pts1, pts2, pts3 = xp.meshgrid(pts_PI[0], pts_PI[1], pts_PI[2], indexing="ij") - # pts1, pts2, pts3 = xp.meshgrid(pts_PI[0], pts_PI[1], pts_PI[2], indexing='ij', sparse=True) # numpy >1.7 + pts1, pts2, pts3 = np.meshgrid(pts_PI[0], pts_PI[1], pts_PI[2], indexing="ij") + # pts1, pts2, pts3 = np.meshgrid(pts_PI[0], pts_PI[1], pts_PI[2], indexing='ij', sparse=True) # numpy >1.7 return fun(pts1, pts2, pts3) @@ -955,25 +939,25 @@ def eval_for_PI(self, comp, fun): # rhs = mat_f # # elif comp=='11': - # rhs = xp.empty( (self.d1, self.n2, self.n3) ) + # rhs = np.empty( (self.d1, self.n2, self.n3) ) # # ker_glob.kernel_int_3d_eta1(self.subs1, self.subs_cum1, self.wts1, # mat_f.reshape(self.ne1, self.nq1, self.n2, self.n3), rhs # ) # elif comp=='12': - # rhs = xp.empty( (self.n1, self.d2, self.n3) ) + # rhs = np.empty( (self.n1, self.d2, self.n3) ) # # ker_glob.kernel_int_3d_eta2(self.subs2, self.subs_cum2, self.wts2, # mat_f.reshape(self.n1, self.ne2, self.nq2, self.n3), rhs # ) # elif comp=='13': - # rhs = xp.empty( (self.n1, self.n2, self.d3) ) + # rhs = np.empty( (self.n1, self.n2, self.d3) ) # # ker_glob.kernel_int_3d_eta3(self.subs3, self.subs_cum3, self.wts3, # mat_f.reshape(self.n1, self.n2, self.ne3, self.nq3), rhs # ) # elif comp=='21': - # rhs = xp.empty( (self.n1, self.d2, self.d3) ) + # rhs = np.empty( (self.n1, self.d2, self.d3) ) # # ker_glob.kernel_int_3d_eta2_eta3(self.subs2, self.subs3, # self.subs_cum2, self.subs_cum3, @@ -981,7 +965,7 @@ def eval_for_PI(self, comp, fun): # mat_f.reshape(self.n1, self.ne2, self.nq2, self.ne3, self.nq3), rhs # ) # elif comp=='22': - # rhs = xp.empty( (self.d1, self.n2, self.d3) ) + # rhs = np.empty( (self.d1, self.n2, self.d3) ) # # ker_glob.kernel_int_3d_eta1_eta3(self.subs1, self.subs3, # self.subs_cum1, self.subs_cum3, @@ -989,7 +973,7 @@ def eval_for_PI(self, comp, fun): # mat_f.reshape(self.ne1, self.nq1, self.n2, self.ne3, self.nq3), rhs # ) # elif comp=='23': - # rhs = xp.empty( (self.d1, self.d2, self.n3) ) + # rhs = np.empty( (self.d1, self.d2, self.n3) ) # # ker_glob.kernel_int_3d_eta1_eta2(self.subs1, self.subs2, # self.subs_cum1, self.subs_cum2, @@ -997,7 +981,7 @@ def eval_for_PI(self, comp, fun): # mat_f.reshape(self.ne1, self.nq1, self.ne2, self.nq2, self.n3), rhs # ) # elif comp=='3': - # rhs = xp.empty( (self.d1, self.d2, self.d3) ) + # rhs = np.empty( (self.d1, self.d2, self.d3) ) # # ker_glob.kernel_int_3d_eta1_eta2_eta3(self.subs1, self.subs2, self.subs3, # self.subs_cum1, self.subs_cum2, self.subs_cum3, @@ -1041,7 +1025,7 @@ def eval_for_PI(self, comp, fun): # # elif comp=='11': # assert mat_dofs.shape == (self.d1, self.n2, self.n3) - # rhs = xp.empty( (self.ne1, self.nq1, self.n2, self.n3) ) + # rhs = np.empty( (self.ne1, self.nq1, self.n2, self.n3) ) # # ker_glob.kernel_int_3d_eta1_transpose(self.subs1, self.subs_cum1, self.wts1, # mat_dofs, rhs) @@ -1050,7 +1034,7 @@ def eval_for_PI(self, comp, fun): # # elif comp=='12': # assert mat_dofs.shape == (self.n1, self.d2, self.n3) - # rhs = xp.empty( (self.n1, self.ne2, self.nq2, self.n3) ) + # rhs = np.empty( (self.n1, self.ne2, self.nq2, self.n3) ) # # ker_glob.kernel_int_3d_eta2_transpose(self.subs2, self.subs_cum2, self.wts2, # mat_dofs, rhs) @@ -1059,7 +1043,7 @@ def eval_for_PI(self, comp, fun): # # elif comp=='13': # assert mat_dofs.shape == (self.n1, self.n2, self.d3) - # rhs = xp.empty( (self.n1, self.n2, self.ne3, self.nq3) ) + # rhs = np.empty( (self.n1, self.n2, self.ne3, self.nq3) ) # # ker_glob.kernel_int_3d_eta3_transpose(self.subs3, self.subs_cum3, self.wts3, # mat_dofs, rhs) @@ -1068,7 +1052,7 @@ def eval_for_PI(self, comp, fun): # # elif comp=='21': # assert mat_dofs.shape == (self.n1, self.d2, self.d3) - # rhs = xp.empty( (self.n1, self.ne2, self.nq2, self.ne3, self.nq3) ) + # rhs = np.empty( (self.n1, self.ne2, self.nq2, self.ne3, self.nq3) ) # # ker_glob.kernel_int_3d_eta2_eta3_transpose(self.subs2, self.subs3, # self.subs_cum2, self.subs_cum3, @@ -1078,7 +1062,7 @@ def eval_for_PI(self, comp, fun): # # elif comp=='22': # assert mat_dofs.shape == (self.d1, self.n2, self.d3) - # rhs = xp.empty( (self.ne1, self.nq1, self.n2, self.ne3, self.nq3) ) + # rhs = np.empty( (self.ne1, self.nq1, self.n2, self.ne3, self.nq3) ) # # ker_glob.kernel_int_3d_eta1_eta3_transpose(self.subs1, self.subs3, # self.subs_cum1, self.subs_cum3, @@ -1088,7 +1072,7 @@ def eval_for_PI(self, comp, fun): # # elif comp=='23': # assert mat_dofs.shape == (self.d1, self.d2, self.n3) - # rhs = xp.empty( (self.ne1, self.nq1, self.ne2, self.nq2, self.n3) ) + # rhs = np.empty( (self.ne1, self.nq1, self.ne2, self.nq2, self.n3) ) # # ker_glob.kernel_int_3d_eta1_eta2_transpose(self.subs1, self.subs2, # self.subs_cum1, self.subs_cum2, @@ -1098,7 +1082,7 @@ def eval_for_PI(self, comp, fun): # # elif comp=='3': # assert mat_dofs.shape == (self.d1, self.d2, self.d3) - # rhs = xp.empty( (self.ne1, self.nq1, self.ne2, self.nq2, self.ne3, self.nq3) ) + # rhs = np.empty( (self.ne1, self.nq1, self.ne2, self.nq2, self.ne3, self.nq3) ) # # ker_glob.kernel_int_3d_eta1_eta2_eta3_transpose(self.subs1, self.subs2, self.subs3, # self.subs_cum1, self.subs_cum2, self.subs_cum3, @@ -1135,8 +1119,7 @@ def dofs(self, comp, mat_f): if comp == "0": dofs = kron_matvec_3d( - [spa.identity(mat_f.shape[0]), spa.identity(mat_f.shape[1]), spa.identity(mat_f.shape[2])], - mat_f, + [spa.identity(mat_f.shape[0]), spa.identity(mat_f.shape[1]), spa.identity(mat_f.shape[2])], mat_f ) elif comp == "11": @@ -1189,18 +1172,15 @@ def dofs_T(self, comp, mat_dofs): elif comp == "11": rhs = kron_matvec_3d( - [self.Q1.T, spa.identity(mat_dofs.shape[1]), spa.identity(mat_dofs.shape[2])], - mat_dofs, + [self.Q1.T, spa.identity(mat_dofs.shape[1]), spa.identity(mat_dofs.shape[2])], mat_dofs ) elif comp == "12": rhs = kron_matvec_3d( - [spa.identity(mat_dofs.shape[0]), self.Q2.T, spa.identity(mat_dofs.shape[2])], - mat_dofs, + [spa.identity(mat_dofs.shape[0]), self.Q2.T, spa.identity(mat_dofs.shape[2])], mat_dofs ) elif comp == "13": rhs = kron_matvec_3d( - [spa.identity(mat_dofs.shape[0]), spa.identity(mat_dofs.shape[1]), self.Q3.T], - mat_dofs, + [spa.identity(mat_dofs.shape[0]), spa.identity(mat_dofs.shape[1]), self.Q3.T], mat_dofs ) elif comp == "21": @@ -1615,26 +1595,26 @@ def __init__(self, tensor_space): else: if tensor_space.n_tor == 0: - x_i3 = xp.array([0.0]) - x_q3 = xp.array([0.0]) - x_q3G = xp.array([0.0]) + x_i3 = np.array([0.0]) + x_q3 = np.array([0.0]) + x_q3G = np.array([0.0]) else: if tensor_space.basis_tor == "r": if tensor_space.n_tor > 0: - x_i3 = xp.array([1.0, 0.25 / tensor_space.n_tor]) - x_q3 = xp.array([1.0, 0.25 / tensor_space.n_tor]) - x_q3G = xp.array([1.0, 0.25 / tensor_space.n_tor]) + x_i3 = np.array([1.0, 0.25 / tensor_space.n_tor]) + x_q3 = np.array([1.0, 0.25 / tensor_space.n_tor]) + x_q3G = np.array([1.0, 0.25 / tensor_space.n_tor]) else: - x_i3 = xp.array([1.0, 0.75 / (-tensor_space.n_tor)]) - x_q3 = xp.array([1.0, 0.75 / (-tensor_space.n_tor)]) - x_q3G = xp.array([1.0, 0.75 / (-tensor_space.n_tor)]) + x_i3 = np.array([1.0, 0.75 / (-tensor_space.n_tor)]) + x_q3 = np.array([1.0, 0.75 / (-tensor_space.n_tor)]) + x_q3G = np.array([1.0, 0.75 / (-tensor_space.n_tor)]) else: - x_i3 = xp.array([0.0]) - x_q3 = xp.array([0.0]) - x_q3G = xp.array([0.0]) + x_i3 = np.array([0.0]) + x_q3 = np.array([0.0]) + x_q3G = np.array([0.0]) self.Q3 = spa.identity(tensor_space.NbaseN[2], format="csr") self.Q3G = spa.identity(tensor_space.NbaseN[2], format="csr") @@ -1776,11 +1756,11 @@ def eval_for_PI(self, comp, fun, eval_kind, with_subs=True): pts_PI = self.getpts_for_PI(comp, with_subs) # array of evaluated function - mat_f = xp.empty((pts_PI[0].size, pts_PI[1].size, pts_PI[2].size), dtype=float) + mat_f = np.empty((pts_PI[0].size, pts_PI[1].size, pts_PI[2].size), dtype=float) # create a meshgrid and evaluate function on point set if eval_kind == "meshgrid": - pts1, pts2, pts3 = xp.meshgrid(pts_PI[0], pts_PI[1], pts_PI[2], indexing="ij") + pts1, pts2, pts3 = np.meshgrid(pts_PI[0], pts_PI[1], pts_PI[2], indexing="ij") mat_f[:, :, :] = fun(pts1, pts2, pts3) # tensor-product evaluation is done by input function @@ -1803,13 +1783,13 @@ def eval_for_PI(self, comp, fun, eval_kind, with_subs=True): # n2 = self.pts_PI_0[1].size # # # apply (I0_22) to each column - # self.S0 = xp.zeros(((n1 - 2)*n2, 3), dtype=float) + # self.S0 = np.zeros(((n1 - 2)*n2, 3), dtype=float) # # for i in range(3): # self.S0[:, i] = kron_lusolve_2d(self.I0_22_LUs, self.I0_21[:, i].toarray().reshape(n1 - 2, n2)).flatten() # # # 3 x 3 matrix - # self.S0 = xp.linalg.inv(self.I0_11.toarray() - self.I0_12.toarray().dot(self.S0)) + # self.S0 = np.linalg.inv(self.I0_11.toarray() - self.I0_12.toarray().dot(self.S0)) # # # ====================================== @@ -1834,7 +1814,7 @@ def eval_for_PI(self, comp, fun, eval_kind, with_subs=True): # # solve for tensor-product coefficients # out2 = out2 - kron_lusolve_2d(self.I0_22_LUs, self.I0_21.dot(self.S0.dot(rhs1)).reshape(n1 - 2, n2)) + kron_lusolve_2d(self.I0_22_LUs, self.I0_21.dot(self.S0.dot(self.I0_12.dot(out2.flatten()))).reshape(n1 - 2, n2)) # - # return xp.concatenate((out1, out2.flatten())) + # return np.concatenate((out1, out2.flatten())) # ====================================== @@ -1856,12 +1836,10 @@ def solve_V1(self, dofs_1, include_bc): # with boundary splines if include_bc: dofs_11 = dofs_1[: self.P1_pol.shape[0] * self.I_tor.shape[0]].reshape( - self.P1_pol.shape[0], - self.I_tor.shape[0], + self.P1_pol.shape[0], self.I_tor.shape[0] ) dofs_12 = dofs_1[self.P1_pol.shape[0] * self.I_tor.shape[0] :].reshape( - self.P0_pol.shape[0], - self.H_tor.shape[0], + self.P0_pol.shape[0], self.H_tor.shape[0] ) coeffs1 = self.I_tor_LU.solve(self.I1_pol_LU.solve(dofs_11).T).T @@ -1870,30 +1848,26 @@ def solve_V1(self, dofs_1, include_bc): # without boundary splines else: dofs_11 = dofs_1[: self.P1_pol_0.shape[0] * self.I0_tor.shape[0]].reshape( - self.P1_pol_0.shape[0], - self.I0_tor.shape[0], + self.P1_pol_0.shape[0], self.I0_tor.shape[0] ) dofs_12 = dofs_1[self.P1_pol_0.shape[0] * self.I0_tor.shape[0] :].reshape( - self.P0_pol_0.shape[0], - self.H0_tor.shape[0], + self.P0_pol_0.shape[0], self.H0_tor.shape[0] ) coeffs1 = self.I0_tor_LU.solve(self.I1_pol_0_LU.solve(dofs_11).T).T coeffs2 = self.H0_tor_LU.solve(self.I0_pol_0_LU.solve(dofs_12).T).T - return xp.concatenate((coeffs1.flatten(), coeffs2.flatten())) + return np.concatenate((coeffs1.flatten(), coeffs2.flatten())) # ====================================== def solve_V2(self, dofs_2, include_bc): # with boundary splines if include_bc: dofs_21 = dofs_2[: self.P2_pol.shape[0] * self.H_tor.shape[0]].reshape( - self.P2_pol.shape[0], - self.H_tor.shape[0], + self.P2_pol.shape[0], self.H_tor.shape[0] ) dofs_22 = dofs_2[self.P2_pol.shape[0] * self.H_tor.shape[0] :].reshape( - self.P3_pol.shape[0], - self.I_tor.shape[0], + self.P3_pol.shape[0], self.I_tor.shape[0] ) coeffs1 = self.H_tor_LU.solve(self.I2_pol_LU.solve(dofs_21).T).T @@ -1902,18 +1876,16 @@ def solve_V2(self, dofs_2, include_bc): # without boundary splines else: dofs_21 = dofs_2[: self.P2_pol_0.shape[0] * self.H0_tor.shape[0]].reshape( - self.P2_pol_0.shape[0], - self.H0_tor.shape[0], + self.P2_pol_0.shape[0], self.H0_tor.shape[0] ) dofs_22 = dofs_2[self.P2_pol_0.shape[0] * self.H0_tor.shape[0] :].reshape( - self.P3_pol_0.shape[0], - self.I0_tor.shape[0], + self.P3_pol_0.shape[0], self.I0_tor.shape[0] ) coeffs1 = self.H0_tor_LU.solve(self.I2_pol_0_LU.solve(dofs_21).T).T coeffs2 = self.I0_tor_LU.solve(self.I3_pol_0_LU.solve(dofs_22).T).T - return xp.concatenate((coeffs1.flatten(), coeffs2.flatten())) + return np.concatenate((coeffs1.flatten(), coeffs2.flatten())) # ====================================== def solve_V3(self, dofs_3, include_bc): @@ -1966,18 +1938,16 @@ def apply_IinvT_V1(self, rhs, include_bc=False): # without boundary splines else: rhs1 = rhs[: self.P1_pol_0.shape[0] * self.I0_tor.shape[0]].reshape( - self.P1_pol_0.shape[0], - self.I0_tor.shape[0], + self.P1_pol_0.shape[0], self.I0_tor.shape[0] ) rhs2 = rhs[self.P1_pol_0.shape[0] * self.I0_tor.shape[0] :].reshape( - self.P0_pol_0.shape[0], - self.H0_tor.shape[0], + self.P0_pol_0.shape[0], self.H0_tor.shape[0] ) rhs1 = self.I1_pol_0_T_LU.solve(self.I0_tor_T_LU.solve(rhs1.T).T) rhs2 = self.I0_pol_0_T_LU.solve(self.H0_tor_T_LU.solve(rhs2.T).T) - return xp.concatenate((rhs1.flatten(), rhs2.flatten())) + return np.concatenate((rhs1.flatten(), rhs2.flatten())) # ====================================== def apply_IinvT_V2(self, rhs, include_bc=False): @@ -1998,18 +1968,16 @@ def apply_IinvT_V2(self, rhs, include_bc=False): # without boundary splines else: rhs1 = rhs[: self.P2_pol_0.shape[0] * self.H0_tor.shape[0]].reshape( - self.P2_pol_0.shape[0], - self.H0_tor.shape[0], + self.P2_pol_0.shape[0], self.H0_tor.shape[0] ) rhs2 = rhs[self.P2_pol_0.shape[0] * self.H0_tor.shape[0] :].reshape( - self.P3_pol_0.shape[0], - self.I0_tor.shape[0], + self.P3_pol_0.shape[0], self.I0_tor.shape[0] ) rhs1 = self.I2_pol_0_T_LU.solve(self.H0_tor_T_LU.solve(rhs1.T).T) rhs2 = self.I3_pol_0_T_LU.solve(self.I0_tor_T_LU.solve(rhs2.T).T) - return xp.concatenate((rhs1.flatten(), rhs2.flatten())) + return np.concatenate((rhs1.flatten(), rhs2.flatten())) # ====================================== def apply_IinvT_V3(self, rhs, include_bc=False): @@ -2036,8 +2004,7 @@ def dofs_0(self, fun, include_bc=True, eval_kind="meshgrid"): # get dofs on tensor-product grid dofs = kron_matvec_3d( - [spa.identity(dofs.shape[0]), spa.identity(dofs.shape[1]), spa.identity(dofs.shape[2])], - dofs, + [spa.identity(dofs.shape[0]), spa.identity(dofs.shape[1]), spa.identity(dofs.shape[2])], dofs ) # apply extraction operator for dofs @@ -2075,9 +2042,9 @@ def dofs_1(self, fun, include_bc=True, eval_kind="meshgrid", with_subs=True): # apply extraction operator for dofs if include_bc: - dofs = self.P1.dot(xp.concatenate((dofs_1.flatten(), dofs_2.flatten(), dofs_3.flatten()))) + dofs = self.P1.dot(np.concatenate((dofs_1.flatten(), dofs_2.flatten(), dofs_3.flatten()))) else: - dofs = self.P1_0.dot(xp.concatenate((dofs_1.flatten(), dofs_2.flatten(), dofs_3.flatten()))) + dofs = self.P1_0.dot(np.concatenate((dofs_1.flatten(), dofs_2.flatten(), dofs_3.flatten()))) return dofs @@ -2108,9 +2075,9 @@ def dofs_2(self, fun, include_bc=True, eval_kind="meshgrid", with_subs=True): # apply extraction operator for dofs if include_bc: - dofs = self.P2.dot(xp.concatenate((dofs_1.flatten(), dofs_2.flatten(), dofs_3.flatten()))) + dofs = self.P2.dot(np.concatenate((dofs_1.flatten(), dofs_2.flatten(), dofs_3.flatten()))) else: - dofs = self.P2_0.dot(xp.concatenate((dofs_1.flatten(), dofs_2.flatten(), dofs_3.flatten()))) + dofs = self.P2_0.dot(np.concatenate((dofs_1.flatten(), dofs_2.flatten(), dofs_3.flatten()))) return dofs @@ -2153,20 +2120,20 @@ def pi_3(self, fun, include_bc=True, eval_kind="meshgrid", with_subs=True): # ======================================== def assemble_approx_inv(self, tol): - if not self.approx_Ik_0_inv or (self.approx_Ik_0_inv and self.approx_Ik_0_tol != tol): + if self.approx_Ik_0_inv == False or (self.approx_Ik_0_inv == True and self.approx_Ik_0_tol != tol): # poloidal plane - I0_pol_0_inv_approx = xp.linalg.inv(self.I0_pol_0.toarray()) - I1_pol_0_inv_approx = xp.linalg.inv(self.I1_pol_0.toarray()) - I2_pol_0_inv_approx = xp.linalg.inv(self.I2_pol_0.toarray()) - I3_pol_0_inv_approx = xp.linalg.inv(self.I3_pol_0.toarray()) - I0_pol_inv_approx = xp.linalg.inv(self.I0_pol.toarray()) + I0_pol_0_inv_approx = np.linalg.inv(self.I0_pol_0.toarray()) + I1_pol_0_inv_approx = np.linalg.inv(self.I1_pol_0.toarray()) + I2_pol_0_inv_approx = np.linalg.inv(self.I2_pol_0.toarray()) + I3_pol_0_inv_approx = np.linalg.inv(self.I3_pol_0.toarray()) + I0_pol_inv_approx = np.linalg.inv(self.I0_pol.toarray()) if tol > 1e-14: - I0_pol_0_inv_approx[xp.abs(I0_pol_0_inv_approx) < tol] = 0.0 - I1_pol_0_inv_approx[xp.abs(I1_pol_0_inv_approx) < tol] = 0.0 - I2_pol_0_inv_approx[xp.abs(I2_pol_0_inv_approx) < tol] = 0.0 - I3_pol_0_inv_approx[xp.abs(I3_pol_0_inv_approx) < tol] = 0.0 - I0_pol_inv_approx[xp.abs(I0_pol_inv_approx) < tol] = 0.0 + I0_pol_0_inv_approx[np.abs(I0_pol_0_inv_approx) < tol] = 0.0 + I1_pol_0_inv_approx[np.abs(I1_pol_0_inv_approx) < tol] = 0.0 + I2_pol_0_inv_approx[np.abs(I2_pol_0_inv_approx) < tol] = 0.0 + I3_pol_0_inv_approx[np.abs(I3_pol_0_inv_approx) < tol] = 0.0 + I0_pol_inv_approx[np.abs(I0_pol_inv_approx) < tol] = 0.0 I0_pol_0_inv_approx = spa.csr_matrix(I0_pol_0_inv_approx) I1_pol_0_inv_approx = spa.csr_matrix(I1_pol_0_inv_approx) @@ -2175,12 +2142,12 @@ def assemble_approx_inv(self, tol): I0_pol_inv_approx = spa.csr_matrix(I0_pol_inv_approx) # toroidal direction - I_inv_tor_approx = xp.linalg.inv(self.I_tor.toarray()) - H_inv_tor_approx = xp.linalg.inv(self.H_tor.toarray()) + I_inv_tor_approx = np.linalg.inv(self.I_tor.toarray()) + H_inv_tor_approx = np.linalg.inv(self.H_tor.toarray()) if tol > 1e-14: - I_inv_tor_approx[xp.abs(I_inv_tor_approx) < tol] = 0.0 - H_inv_tor_approx[xp.abs(H_inv_tor_approx) < tol] = 0.0 + I_inv_tor_approx[np.abs(I_inv_tor_approx) < tol] = 0.0 + H_inv_tor_approx[np.abs(H_inv_tor_approx) < tol] = 0.0 I_inv_tor_approx = spa.csr_matrix(I_inv_tor_approx) H_inv_tor_approx = spa.csr_matrix(H_inv_tor_approx) diff --git a/src/struphy/eigenvalue_solvers/spline_space.py b/src/struphy/eigenvalue_solvers/spline_space.py index c10124e57..cffc1902d 100644 --- a/src/struphy/eigenvalue_solvers/spline_space.py +++ b/src/struphy/eigenvalue_solvers/spline_space.py @@ -6,10 +6,11 @@ Basic modules to create tensor-product finite element spaces of univariate B-splines. """ -import cunumpy as xp import matplotlib import scipy.sparse as spa +from struphy.utils.arrays import xp as np + matplotlib.rcParams.update({"font.size": 16}) import matplotlib.pyplot as plt @@ -49,19 +50,19 @@ class Spline_space_1d: Attributes ---------- - el_b : xp.array + el_b : np.array Element boundaries, equally spaced. delta : float Uniform grid spacing - T : xp.array + T : np.array Knot vector of 0-space. - t : xp.arrray + t : np.arrray Knot vector of 1-space. - greville : xp.array + greville : np.array Greville points. NbaseN : int @@ -70,22 +71,22 @@ class Spline_space_1d: NbaseD : int Dimension of 1-space. - indN : xp.array + indN : np.array Global indices of non-vanishing B-splines in each element in format (element, local basis function) - indD : xp.array + indD : np.array Global indices of non-vanishing M-splines in each element in format (element, local basis function) - pts : xp.array + pts : np.array Global GL quadrature points in format (element, local point). - wts : xp.array + wts : np.array Global GL quadrature weights in format (element, local point). - basisN : xp.array + basisN : np.array N-basis functions evaluated at quadrature points in format (element, local basis function, derivative, local point) - basisD : xp.array + basisD : np.array D-basis functions evaluated at quadrature points in format (element, local basis function, derivative, local point) E0 : csr_matrix @@ -139,7 +140,7 @@ def __init__(self, Nel, p, spl_kind, n_quad=6, bc=["f", "f"]): else: self.bc = bc - self.el_b = xp.linspace(0.0, 1.0, Nel + 1) # element boundaries + self.el_b = np.linspace(0.0, 1.0, Nel + 1) # element boundaries self.delta = 1 / self.Nel # element length self.T = bsp.make_knots(self.el_b, self.p, self.spl_kind) # spline knot vector for B-splines (N) @@ -151,13 +152,13 @@ def __init__(self, Nel, p, spl_kind, n_quad=6, bc=["f", "f"]): self.NbaseD = self.NbaseN - 1 + self.spl_kind # total number of M-splines (D) # global indices of non-vanishing splines in each element in format (Nel, p + 1) - self.indN = (xp.indices((self.Nel, self.p + 1 - 0))[1] + xp.arange(self.Nel)[:, None]) % self.NbaseN - self.indD = (xp.indices((self.Nel, self.p + 1 - 1))[1] + xp.arange(self.Nel)[:, None]) % self.NbaseD + self.indN = (np.indices((self.Nel, self.p + 1 - 0))[1] + np.arange(self.Nel)[:, None]) % self.NbaseN + self.indD = (np.indices((self.Nel, self.p + 1 - 1))[1] + np.arange(self.Nel)[:, None]) % self.NbaseD self.n_quad = n_quad # number of Gauss-Legendre points per grid cell (defined by break points) - self.pts_loc = xp.polynomial.legendre.leggauss(self.n_quad)[0] # Gauss-Legendre points (GLQP) in (-1, 1) - self.wts_loc = xp.polynomial.legendre.leggauss(self.n_quad)[1] # Gauss-Legendre weights (GLQW) in (-1, 1) + self.pts_loc = np.polynomial.legendre.leggauss(self.n_quad)[0] # Gauss-Legendre points (GLQP) in (-1, 1) + self.wts_loc = np.polynomial.legendre.leggauss(self.n_quad)[1] # Gauss-Legendre weights (GLQW) in (-1, 1) # global GLQP in format (element, local point) and total number of GLQP self.pts = bsp.quadrature_grid(self.el_b, self.pts_loc, self.wts_loc)[0] @@ -177,8 +178,8 @@ def __init__(self, Nel, p, spl_kind, n_quad=6, bc=["f", "f"]): d1 = self.NbaseD # boundary operators - self.B0 = xp.identity(n1, dtype=float) - self.B1 = xp.identity(d1, dtype=float) + self.B0 = np.identity(n1, dtype=float) + self.B1 = np.identity(d1, dtype=float) # extraction operators without boundary conditions self.E0 = spa.csr_matrix(self.B0.copy()) @@ -267,16 +268,16 @@ def evaluate_N(self, eta, coeff, kind=0): coeff = self.E0_0.T.dot(coeff) if isinstance(eta, float): - pts = xp.array([eta]) - elif isinstance(eta, xp.ndarray): + pts = np.array([eta]) + elif isinstance(eta, np.ndarray): pts = eta.flatten() - values = xp.empty(pts.size, dtype=float) + values = np.empty(pts.size, dtype=float) eva_1d.evaluate_vector(self.T, self.p, self.indN, coeff, pts, values, kind) if isinstance(eta, float): values = values[0] - elif isinstance(eta, xp.ndarray): + elif isinstance(eta, np.ndarray): values = values.reshape(eta.shape) return values @@ -303,16 +304,16 @@ def evaluate_D(self, eta, coeff): assert coeff.size == self.E1.shape[0] if isinstance(eta, float): - pts = xp.array([eta]) - elif isinstance(eta, xp.ndarray): + pts = np.array([eta]) + elif isinstance(eta, np.ndarray): pts = eta.flatten() - values = xp.empty(pts.size, dtype=float) + values = np.empty(pts.size, dtype=float) eva_1d.evaluate_vector(self.t, self.p - 1, self.indD, coeff, pts, values, 1) if isinstance(eta, float): values = values[0] - elif isinstance(eta, xp.ndarray): + elif isinstance(eta, np.ndarray): values = values.reshape(eta.shape) return values @@ -331,12 +332,12 @@ def plot_splines(self, n_pts=500, which="N"): which basis to plot. 'N', 'D' or 'dN' (optional, default='N') """ - etaplot = xp.linspace(0.0, 1.0, n_pts) + etaplot = np.linspace(0.0, 1.0, n_pts) degree = self.p if which == "N": - coeff = xp.zeros(self.NbaseN, dtype=float) + coeff = np.zeros(self.NbaseN, dtype=float) for i in range(self.NbaseN): coeff[:] = 0.0 @@ -344,7 +345,7 @@ def plot_splines(self, n_pts=500, which="N"): plt.plot(etaplot, self.evaluate_N(etaplot, coeff), label=str(i)) elif which == "D": - coeff = xp.zeros(self.NbaseD, dtype=float) + coeff = np.zeros(self.NbaseD, dtype=float) for i in range(self.NbaseD): coeff[:] = 0.0 @@ -354,7 +355,7 @@ def plot_splines(self, n_pts=500, which="N"): degree = self.p - 1 elif which == "dN": - coeff = xp.zeros(self.NbaseN, dtype=float) + coeff = np.zeros(self.NbaseN, dtype=float) for i in range(self.NbaseN): coeff[:] = 0.0 @@ -369,8 +370,8 @@ def plot_splines(self, n_pts=500, which="N"): else: bcs = "clamped" - (greville,) = plt.plot(self.greville, xp.zeros(self.greville.shape), "ro", label="greville") - (breaks,) = plt.plot(self.el_b, xp.zeros(self.el_b.shape), "k+", label="breaks") + (greville,) = plt.plot(self.greville, np.zeros(self.greville.shape), "ro", label="greville") + (breaks,) = plt.plot(self.el_b, np.zeros(self.el_b.shape), "k+", label="breaks") plt.title(which + f"$^{degree}$-splines, " + bcs + f", Nel={self.Nel}") plt.legend(handles=[greville, breaks]) @@ -554,8 +555,8 @@ def __init__(self, spline_spaces, ck=-1, cx=None, cy=None, n_tor=0, basis_tor="r self.M1_tor = spa.identity(1, format="csr") else: - self.M0_tor = spa.csr_matrix(xp.identity(2) / 2) - self.M1_tor = spa.csr_matrix(xp.identity(2) / 2) + self.M0_tor = spa.csr_matrix(np.identity(2) / 2) + self.M1_tor = spa.csr_matrix(np.identity(2) / 2) else: self.M0_tor = mass_1d.get_M(self.spaces[2], 0, 0) @@ -712,33 +713,27 @@ def __init__(self, spline_spaces, ck=-1, cx=None, cy=None, n_tor=0, basis_tor="r # extraction operators for 3D diagram: without boundary conditions self.E0 = spa.kron(self.E0_pol, self.E0_tor, format="csr") self.E1 = spa.bmat( - [[spa.kron(self.E1_pol, self.E0_tor), None], [None, spa.kron(self.E0_pol, self.E1_tor)]], - format="csr", + [[spa.kron(self.E1_pol, self.E0_tor), None], [None, spa.kron(self.E0_pol, self.E1_tor)]], format="csr" ) self.E2 = spa.bmat( - [[spa.kron(self.E2_pol, self.E1_tor), None], [None, spa.kron(self.E3_pol, self.E0_tor)]], - format="csr", + [[spa.kron(self.E2_pol, self.E1_tor), None], [None, spa.kron(self.E3_pol, self.E0_tor)]], format="csr" ) self.E3 = spa.kron(self.E3_pol, self.E1_tor, format="csr") self.Ev = spa.bmat( - [[spa.kron(self.Ev_pol, self.E0_tor), None], [None, spa.kron(self.E0_pol, self.E0_tor)]], - format="csr", + [[spa.kron(self.Ev_pol, self.E0_tor), None], [None, spa.kron(self.E0_pol, self.E0_tor)]], format="csr" ) # boundary operators for 3D diagram self.B0 = spa.kron(self.B0_pol, self.B0_tor, format="csr") self.B1 = spa.bmat( - [[spa.kron(self.B1_pol, self.B0_tor), None], [None, spa.kron(self.B0_pol, self.B1_tor)]], - format="csr", + [[spa.kron(self.B1_pol, self.B0_tor), None], [None, spa.kron(self.B0_pol, self.B1_tor)]], format="csr" ) self.B2 = spa.bmat( - [[spa.kron(self.B2_pol, self.B1_tor), None], [None, spa.kron(self.B3_pol, self.B0_tor)]], - format="csr", + [[spa.kron(self.B2_pol, self.B1_tor), None], [None, spa.kron(self.B3_pol, self.B0_tor)]], format="csr" ) self.B3 = spa.kron(self.B3_pol, self.B1_tor, format="csr") self.Bv = spa.bmat( - [[spa.kron(self.Bv_pol, self.E0_tor), None], [None, spa.kron(Bv3, self.B0_tor)]], - format="csr", + [[spa.kron(self.Bv_pol, self.E0_tor), None], [None, spa.kron(Bv3, self.B0_tor)]], format="csr" ) # extraction operators for 3D diagram: with boundary conditions @@ -791,7 +786,7 @@ def apply_M1_ten(self, x, mats): out1 = mats[0][1].dot(mats[0][0].dot(x1).T).T out2 = mats[1][1].dot(mats[1][0].dot(x2).T).T - return xp.concatenate((out1.flatten(), out2.flatten())) + return np.concatenate((out1.flatten(), out2.flatten())) def apply_M2_ten(self, x, mats): """ @@ -803,7 +798,7 @@ def apply_M2_ten(self, x, mats): out1 = mats[0][1].dot(mats[0][0].dot(x1).T).T out2 = mats[1][1].dot(mats[1][0].dot(x2).T).T - return xp.concatenate((out1.flatten(), out2.flatten())) + return np.concatenate((out1.flatten(), out2.flatten())) def apply_M3_ten(self, x, mats): """ @@ -826,7 +821,7 @@ def apply_Mv_ten(self, x, mats): out1 = mats[0][1].dot(mats[0][0].dot(x1).T).T out2 = mats[1][1].dot(mats[1][0].dot(x2).T).T - return xp.concatenate((out1.flatten(), out2.flatten())) + return np.concatenate((out1.flatten(), out2.flatten())) def apply_M0_0_ten(self, x, mats): """ @@ -847,13 +842,13 @@ def apply_M1_0_ten(self, x, mats): x1, x2 = self.reshape_pol_1(x) out1 = self.B0_tor.dot( - mats[0][1].dot(self.B0_tor.T.dot(self.B1_pol.dot(mats[0][0].dot(self.B1_pol.T.dot(x1))).T)), + mats[0][1].dot(self.B0_tor.T.dot(self.B1_pol.dot(mats[0][0].dot(self.B1_pol.T.dot(x1))).T)) ).T out2 = self.B1_tor.dot( - mats[1][1].dot(self.B1_tor.T.dot(self.B0_pol.dot(mats[1][0].dot(self.B0_pol.T.dot(x2))).T)), + mats[1][1].dot(self.B1_tor.T.dot(self.B0_pol.dot(mats[1][0].dot(self.B0_pol.T.dot(x2))).T)) ).T - return xp.concatenate((out1.flatten(), out2.flatten())) + return np.concatenate((out1.flatten(), out2.flatten())) def apply_M2_0_ten(self, x, mats): """ @@ -863,13 +858,13 @@ def apply_M2_0_ten(self, x, mats): x1, x2 = self.reshape_pol_2(x) out1 = self.B1_tor.dot( - mats[0][1].dot(self.B1_tor.T.dot(self.B2_pol.dot(mats[0][0].dot(self.B2_pol.T.dot(x1))).T)), + mats[0][1].dot(self.B1_tor.T.dot(self.B2_pol.dot(mats[0][0].dot(self.B2_pol.T.dot(x1))).T)) ).T out2 = self.B0_tor.dot( - mats[1][1].dot(self.B0_tor.T.dot(self.B3_pol.dot(mats[1][0].dot(self.B3_pol.T.dot(x2))).T)), + mats[1][1].dot(self.B0_tor.T.dot(self.B3_pol.dot(mats[1][0].dot(self.B3_pol.T.dot(x2))).T)) ).T - return xp.concatenate((out1.flatten(), out2.flatten())) + return np.concatenate((out1.flatten(), out2.flatten())) def apply_M3_0_ten(self, x, mats): """ @@ -892,7 +887,7 @@ def apply_Mv_0_ten(self, x, mats): out1 = mats[0][1].dot(self.Bv_pol.dot(mats[0][0].dot(self.Bv_pol.T.dot(x1))).T).T out2 = self.B0_tor.dot(mats[1][1].dot(self.B0_tor.T.dot(mats[1][0].dot(x2).T))).T - return xp.concatenate((out1.flatten(), out2.flatten())) + return np.concatenate((out1.flatten(), out2.flatten())) def __assemble_M0(self, domain, as_tensor=False): """ @@ -934,12 +929,10 @@ def __assemble_M1(self, domain, as_tensor=False): self.M1_pol_mat = mass_2d.get_M1(self, domain) matvec = lambda x: self.apply_M1_ten( - x, - [[self.M1_pol_mat[0], self.M0_tor], [self.M1_pol_mat[1], self.M1_tor]], + x, [[self.M1_pol_mat[0], self.M0_tor], [self.M1_pol_mat[1], self.M1_tor]] ) matvec_0 = lambda x: self.apply_M1_0_ten( - x, - [[self.M1_pol_mat[0], self.M0_tor], [self.M1_pol_mat[1], self.M1_tor]], + x, [[self.M1_pol_mat[0], self.M0_tor], [self.M1_pol_mat[1], self.M1_tor]] ) # 3D @@ -947,8 +940,7 @@ def __assemble_M1(self, domain, as_tensor=False): if self.dim == 2: M11, M22 = mass_2d.get_M1(self, domain) self.M1_mat = spa.bmat( - [[spa.kron(M11, self.M0_tor), None], [None, spa.kron(M22, self.M1_tor)]], - format="csr", + [[spa.kron(M11, self.M0_tor), None], [None, spa.kron(M22, self.M1_tor)]], format="csr" ) else: self.M1_mat = mass_3d.get_M1(self, domain) @@ -972,12 +964,10 @@ def __assemble_M2(self, domain, as_tensor=False): self.M2_pol_mat = mass_2d.get_M2(self, domain) matvec = lambda x: self.apply_M2_ten( - x, - [[self.M2_pol_mat[0], self.M1_tor], [self.M2_pol_mat[1], self.M0_tor]], + x, [[self.M2_pol_mat[0], self.M1_tor], [self.M2_pol_mat[1], self.M0_tor]] ) matvec_0 = lambda x: self.apply_M2_0_ten( - x, - [[self.M2_pol_mat[0], self.M1_tor], [self.M2_pol_mat[1], self.M0_tor]], + x, [[self.M2_pol_mat[0], self.M1_tor], [self.M2_pol_mat[1], self.M0_tor]] ) # 3D @@ -985,8 +975,7 @@ def __assemble_M2(self, domain, as_tensor=False): if self.dim == 2: M11, M22 = mass_2d.get_M2(self, domain) self.M2_mat = spa.bmat( - [[spa.kron(M11, self.M1_tor), None], [None, spa.kron(M22, self.M0_tor)]], - format="csr", + [[spa.kron(M11, self.M1_tor), None], [None, spa.kron(M22, self.M0_tor)]], format="csr" ) else: self.M2_mat = mass_3d.get_M2(self, domain) @@ -1038,12 +1027,10 @@ def __assemble_Mv(self, domain, as_tensor=False): self.Mv_pol_mat = mass_2d.get_Mv(self, domain) matvec = lambda x: self.apply_Mv_ten( - x, - [[self.Mv_pol_mat[0], self.M0_tor], [self.Mv_pol_mat[1], self.M0_tor]], + x, [[self.Mv_pol_mat[0], self.M0_tor], [self.Mv_pol_mat[1], self.M0_tor]] ) matvec_0 = lambda x: self.apply_Mv_0_ten( - x, - [[self.Mv_pol_mat[0], self.M0_tor], [self.Mv_pol_mat[1], self.M0_tor]], + x, [[self.Mv_pol_mat[0], self.M0_tor], [self.Mv_pol_mat[1], self.M0_tor]] ) # 3D @@ -1051,8 +1038,7 @@ def __assemble_Mv(self, domain, as_tensor=False): if self.dim == 2: M11, M22 = mass_2d.get_Mv(self, domain) self.Mv_mat = spa.bmat( - [[spa.kron(M11, self.M0_tor), None], [None, spa.kron(M22, self.M0_tor)]], - format="csr", + [[spa.kron(M11, self.M0_tor), None], [None, spa.kron(M22, self.M0_tor)]], format="csr" ) else: self.Mv_mat = mass_3d.get_Mv(self, domain) @@ -1108,21 +1094,17 @@ def reshape_pol_1(self, coeff): if c_size == self.E1.shape[0]: coeff1_pol_1 = coeff[: self.E1_pol.shape[0] * self.E0_tor.shape[0]].reshape( - self.E1_pol.shape[0], - self.E0_tor.shape[0], + self.E1_pol.shape[0], self.E0_tor.shape[0] ) coeff1_pol_3 = coeff[self.E1_pol.shape[0] * self.E0_tor.shape[0] :].reshape( - self.E0_pol.shape[0], - self.E1_tor.shape[0], + self.E0_pol.shape[0], self.E1_tor.shape[0] ) else: coeff1_pol_1 = coeff[: self.E1_pol_0.shape[0] * self.E0_tor_0.shape[0]].reshape( - self.E1_pol_0.shape[0], - self.E0_tor_0.shape[0], + self.E1_pol_0.shape[0], self.E0_tor_0.shape[0] ) coeff1_pol_3 = coeff[self.E1_pol_0.shape[0] * self.E0_tor_0.shape[0] :].reshape( - self.E0_pol_0.shape[0], - self.E1_tor_0.shape[0], + self.E0_pol_0.shape[0], self.E1_tor_0.shape[0] ) return coeff1_pol_1, coeff1_pol_3 @@ -1138,21 +1120,17 @@ def reshape_pol_2(self, coeff): if c_size == self.E2.shape[0]: coeff2_pol_1 = coeff[: self.E2_pol.shape[0] * self.E1_tor.shape[0]].reshape( - self.E2_pol.shape[0], - self.E1_tor.shape[0], + self.E2_pol.shape[0], self.E1_tor.shape[0] ) coeff2_pol_3 = coeff[self.E2_pol.shape[0] * self.E1_tor.shape[0] :].reshape( - self.E3_pol.shape[0], - self.E0_tor.shape[0], + self.E3_pol.shape[0], self.E0_tor.shape[0] ) else: coeff2_pol_1 = coeff[: self.E2_pol_0.shape[0] * self.E1_tor_0.shape[0]].reshape( - self.E2_pol_0.shape[0], - self.E1_tor_0.shape[0], + self.E2_pol_0.shape[0], self.E1_tor_0.shape[0] ) coeff2_pol_3 = coeff[self.E2_pol_0.shape[0] * self.E1_tor_0.shape[0] :].reshape( - self.E3_pol_0.shape[0], - self.E0_tor_0.shape[0], + self.E3_pol_0.shape[0], self.E0_tor_0.shape[0] ) return coeff2_pol_1, coeff2_pol_3 @@ -1184,22 +1162,18 @@ def reshape_pol_v(self, coeff): if c_size == self.Ev.shape[0]: coeffv_pol_1 = coeff[: self.Ev_pol.shape[0] * self.E0_tor.shape[0]].reshape( - self.Ev_pol.shape[0], - self.E0_tor.shape[0], + self.Ev_pol.shape[0], self.E0_tor.shape[0] ) coeffv_pol_3 = coeff[self.Ev_pol.shape[0] * self.E0_tor.shape[0] :].reshape( - self.E0_pol.shape[0], - self.E0_tor.shape[0], + self.E0_pol.shape[0], self.E0_tor.shape[0] ) else: coeffv_pol_1 = coeff[: self.Ev_pol_0.shape[0] * self.E0_tor.shape[0]].reshape( - self.Ev_pol_0.shape[0], - self.E0_tor.shape[0], + self.Ev_pol_0.shape[0], self.E0_tor.shape[0] ) coeffv_pol_3 = coeff[self.Ev_pol_0.shape[0] * self.E0_tor.shape[0] :].reshape( - self.E0_pol.shape[0], - self.E0_tor_0.shape[0], + self.E0_pol.shape[0], self.E0_tor_0.shape[0] ) return coeffv_pol_1, coeffv_pol_3 @@ -1255,7 +1229,7 @@ def extract_1(self, coeff): else: coeff1 = self.E1_0.T.dot(coeff) - coeff1_1, coeff1_2, coeff1_3 = xp.split(coeff1, [self.Ntot_1form_cum[0], self.Ntot_1form_cum[1]]) + coeff1_1, coeff1_2, coeff1_3 = np.split(coeff1, [self.Ntot_1form_cum[0], self.Ntot_1form_cum[1]]) coeff1_1 = coeff1_1.reshape(self.Nbase_1form[0]) coeff1_2 = coeff1_2.reshape(self.Nbase_1form[1]) @@ -1286,7 +1260,7 @@ def extract_2(self, coeff): else: coeff2 = self.E2_0.T.dot(coeff) - coeff2_1, coeff2_2, coeff2_3 = xp.split(coeff2, [self.Ntot_2form_cum[0], self.Ntot_2form_cum[1]]) + coeff2_1, coeff2_2, coeff2_3 = np.split(coeff2, [self.Ntot_2form_cum[0], self.Ntot_2form_cum[1]]) coeff2_1 = coeff2_1.reshape(self.Nbase_2form[0]) coeff2_2 = coeff2_2.reshape(self.Nbase_2form[1]) @@ -1331,7 +1305,7 @@ def extract_v(self, coeff): else: coeffv = self.Ev_0.T.dot(coeff) - coeffv_1, coeffv_2, coeffv_3 = xp.split(coeffv, [self.Ntot_0form, 2 * self.Ntot_0form]) + coeffv_1, coeffv_2, coeffv_3 = np.split(coeffv, [self.Ntot_0form, 2 * self.Ntot_0form]) coeffv_1 = coeffv_1.reshape(self.Nbase_0form) coeffv_2 = coeffv_2.reshape(self.Nbase_0form) @@ -1384,15 +1358,15 @@ def evaluate_NN(self, eta1, eta2, eta3, coeff, which="V0", part="r"): assert coeff.shape[:2] == (self.NbaseN[0], self.NbaseN[1]) # get real and imaginary part - coeff_r = xp.real(coeff) - coeff_i = xp.imag(coeff) + coeff_r = np.real(coeff) + coeff_i = np.imag(coeff) # ------ evaluate FEM field at given points -------- - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor-product evaluation if eta1.ndim == 1: - values_r_1 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) - values_i_1 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_r_1 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_i_1 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) eva_2d.evaluate_tensor_product_2d( self.T[0], @@ -1422,8 +1396,8 @@ def evaluate_NN(self, eta1, eta2, eta3, coeff, which="V0", part="r"): ) if self.n_tor != 0 and self.basis_tor == "r": - values_r_2 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) - values_i_2 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_r_2 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_i_2 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) eva_2d.evaluate_tensor_product_2d( self.T[0], @@ -1454,8 +1428,8 @@ def evaluate_NN(self, eta1, eta2, eta3, coeff, which="V0", part="r"): # matrix evaluation else: - values_r_1 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) - values_i_1 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_r_1 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_i_1 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) eva_2d.evaluate_matrix_2d( self.T[0], @@ -1485,8 +1459,8 @@ def evaluate_NN(self, eta1, eta2, eta3, coeff, which="V0", part="r"): ) if self.n_tor != 0 and self.basis_tor == "r": - values_r_2 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) - values_i_2 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_r_2 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_i_2 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) eva_2d.evaluate_matrix_2d( self.T[0], @@ -1517,15 +1491,15 @@ def evaluate_NN(self, eta1, eta2, eta3, coeff, which="V0", part="r"): # multiply with Fourier basis in third direction if self.n_tor == 0: - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.ones(eta3.shape, dtype=float) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.ones(eta3.shape, dtype=float) else: if self.basis_tor == "r": - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.cos(2 * xp.pi * self.n_tor * eta3) - out += (values_r_2 + 1j * values_i_2)[:, :, None] * xp.sin(2 * xp.pi * self.n_tor * eta3) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.cos(2 * np.pi * self.n_tor * eta3) + out += (values_r_2 + 1j * values_i_2)[:, :, None] * np.sin(2 * np.pi * self.n_tor * eta3) else: - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.exp(1j * 2 * xp.pi * self.n_tor * eta3) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.exp(1j * 2 * np.pi * self.n_tor * eta3) # --------- evaluate FEM field at given point ------- else: @@ -1554,26 +1528,10 @@ def evaluate_NN(self, eta1, eta2, eta3, coeff, which="V0", part="r"): if self.n_tor != 0 and self.basis_tor == "r": real_2 = eva_2d.evaluate_n_n( - self.T[0], - self.T[1], - self.p[0], - self.p[1], - self.indN[0], - self.indN[1], - coeff_r[:, :, 1], - eta1, - eta2, + self.T[0], self.T[1], self.p[0], self.p[1], self.indN[0], self.indN[1], coeff_r[:, :, 1], eta1, eta2 ) imag_2 = eva_2d.evaluate_n_n( - self.T[0], - self.T[1], - self.p[0], - self.p[1], - self.indN[0], - self.indN[1], - coeff_i[:, :, 1], - eta1, - eta2, + self.T[0], self.T[1], self.p[0], self.p[1], self.indN[0], self.indN[1], coeff_i[:, :, 1], eta1, eta2 ) # multiply with Fourier basis in third direction if |n_tor| > 0 @@ -1582,17 +1540,17 @@ def evaluate_NN(self, eta1, eta2, eta3, coeff, which="V0", part="r"): else: if self.basis_tor == "r": - out = (real_1 + 1j * imag_1) * xp.cos(2 * xp.pi * self.n_tor * eta3) - out += (real_2 + 1j * imag_2) * xp.sin(2 * xp.pi * self.n_tor * eta3) + out = (real_1 + 1j * imag_1) * np.cos(2 * np.pi * self.n_tor * eta3) + out += (real_2 + 1j * imag_2) * np.sin(2 * np.pi * self.n_tor * eta3) else: - out = (real_1 + 1j * imag_1) * xp.exp(1j * 2 * xp.pi * self.n_tor * eta3) + out = (real_1 + 1j * imag_1) * np.exp(1j * 2 * np.pi * self.n_tor * eta3) # return real or imaginary part if part == "r": - out = xp.real(out) + out = np.real(out) else: - out = xp.imag(out) + out = np.imag(out) return out @@ -1641,15 +1599,15 @@ def evaluate_DN(self, eta1, eta2, eta3, coeff, which="V1", part="r"): assert coeff.shape[:2] == (self.NbaseD[0], self.NbaseN[1]) # get real and imaginary part - coeff_r = xp.real(coeff) - coeff_i = xp.imag(coeff) + coeff_r = np.real(coeff) + coeff_i = np.imag(coeff) # ------ evaluate FEM field at given points -------- - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor-product evaluation if eta1.ndim == 1: - values_r_1 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) - values_i_1 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_r_1 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_i_1 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) eva_2d.evaluate_tensor_product_2d( self.t[0], @@ -1679,8 +1637,8 @@ def evaluate_DN(self, eta1, eta2, eta3, coeff, which="V1", part="r"): ) if self.n_tor != 0 and self.basis_tor == "r": - values_r_2 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) - values_i_2 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_r_2 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_i_2 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) eva_2d.evaluate_tensor_product_2d( self.t[0], @@ -1711,8 +1669,8 @@ def evaluate_DN(self, eta1, eta2, eta3, coeff, which="V1", part="r"): # matrix evaluation else: - values_r_1 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) - values_i_1 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_r_1 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_i_1 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) eva_2d.evaluate_matrix_2d( self.t[0], @@ -1742,8 +1700,8 @@ def evaluate_DN(self, eta1, eta2, eta3, coeff, which="V1", part="r"): ) if self.n_tor != 0 and self.basis_tor == "r": - values_r_2 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) - values_i_2 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_r_2 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_i_2 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) eva_2d.evaluate_matrix_2d( self.t[0], @@ -1774,15 +1732,15 @@ def evaluate_DN(self, eta1, eta2, eta3, coeff, which="V1", part="r"): # multiply with Fourier basis in third direction if self.n_tor == 0: - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.ones(eta3.shape, dtype=float) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.ones(eta3.shape, dtype=float) else: if self.basis_tor == "r": - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.cos(2 * xp.pi * self.n_tor * eta3) - out += (values_r_2 + 1j * values_i_2)[:, :, None] * xp.sin(2 * xp.pi * self.n_tor * eta3) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.cos(2 * np.pi * self.n_tor * eta3) + out += (values_r_2 + 1j * values_i_2)[:, :, None] * np.sin(2 * np.pi * self.n_tor * eta3) else: - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.exp(1j * 2 * xp.pi * self.n_tor * eta3) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.exp(1j * 2 * np.pi * self.n_tor * eta3) # --------- evaluate FEM field at given point ------- else: @@ -1839,17 +1797,17 @@ def evaluate_DN(self, eta1, eta2, eta3, coeff, which="V1", part="r"): else: if self.basis_tor == "r": - out = (real_1 + 1j * imag_1) * xp.cos(2 * xp.pi * self.n_tor * eta3) - out += (real_2 + 1j * imag_2) * xp.sin(2 * xp.pi * self.n_tor * eta3) + out = (real_1 + 1j * imag_1) * np.cos(2 * np.pi * self.n_tor * eta3) + out += (real_2 + 1j * imag_2) * np.sin(2 * np.pi * self.n_tor * eta3) else: - out = (real_1 + 1j * imag_1) * xp.exp(1j * 2 * xp.pi * self.n_tor * eta3) + out = (real_1 + 1j * imag_1) * np.exp(1j * 2 * np.pi * self.n_tor * eta3) # return real or imaginary part if part == "r": - out = xp.real(out) + out = np.real(out) else: - out = xp.imag(out) + out = np.imag(out) return out @@ -1898,15 +1856,15 @@ def evaluate_ND(self, eta1, eta2, eta3, coeff, which="V2", part="r"): assert coeff.shape[:2] == (self.NbaseN[0], self.NbaseD[1]) # get real and imaginary part - coeff_r = xp.real(coeff) - coeff_i = xp.imag(coeff) + coeff_r = np.real(coeff) + coeff_i = np.imag(coeff) # ------ evaluate FEM field at given points -------- - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor-product evaluation if eta1.ndim == 1: - values_r_1 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) - values_i_1 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_r_1 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_i_1 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) eva_2d.evaluate_tensor_product_2d( self.T[0], @@ -1936,8 +1894,8 @@ def evaluate_ND(self, eta1, eta2, eta3, coeff, which="V2", part="r"): ) if self.n_tor != 0 and self.basis_tor == "r": - values_r_2 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) - values_i_2 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_r_2 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_i_2 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) eva_2d.evaluate_tensor_product_2d( self.T[0], @@ -1968,8 +1926,8 @@ def evaluate_ND(self, eta1, eta2, eta3, coeff, which="V2", part="r"): # matrix evaluation else: - values_r_1 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) - values_i_1 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_r_1 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_i_1 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) eva_2d.evaluate_matrix_2d( self.T[0], @@ -1999,8 +1957,8 @@ def evaluate_ND(self, eta1, eta2, eta3, coeff, which="V2", part="r"): ) if self.n_tor != 0 and self.basis_tor == "r": - values_r_2 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) - values_i_2 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_r_2 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_i_2 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) eva_2d.evaluate_matrix_2d( self.T[0], @@ -2031,15 +1989,15 @@ def evaluate_ND(self, eta1, eta2, eta3, coeff, which="V2", part="r"): # multiply with Fourier basis in third direction if self.n_tor == 0: - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.ones(eta3.shape, dtype=float) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.ones(eta3.shape, dtype=float) else: if self.basis_tor == "r": - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.cos(2 * xp.pi * self.n_tor * eta3) - out += (values_r_2 + 1j * values_i_2)[:, :, None] * xp.sin(2 * xp.pi * self.n_tor * eta3) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.cos(2 * np.pi * self.n_tor * eta3) + out += (values_r_2 + 1j * values_i_2)[:, :, None] * np.sin(2 * np.pi * self.n_tor * eta3) else: - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.exp(1j * 2 * xp.pi * self.n_tor * eta3) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.exp(1j * 2 * np.pi * self.n_tor * eta3) # --------- evaluate FEM field at given point ------- else: @@ -2096,17 +2054,17 @@ def evaluate_ND(self, eta1, eta2, eta3, coeff, which="V2", part="r"): else: if self.basis_tor == "r": - out = (real_1 + 1j * imag_1) * xp.cos(2 * xp.pi * self.n_tor * eta3) - out += (real_2 + 1j * imag_2) * xp.sin(2 * xp.pi * self.n_tor * eta3) + out = (real_1 + 1j * imag_1) * np.cos(2 * np.pi * self.n_tor * eta3) + out += (real_2 + 1j * imag_2) * np.sin(2 * np.pi * self.n_tor * eta3) else: - out = (real_1 + 1j * imag_1) * xp.exp(1j * 2 * xp.pi * self.n_tor * eta3) + out = (real_1 + 1j * imag_1) * np.exp(1j * 2 * np.pi * self.n_tor * eta3) # return real or imaginary part if part == "r": - out = xp.real(out) + out = np.real(out) else: - out = xp.imag(out) + out = np.imag(out) return out @@ -2158,15 +2116,15 @@ def evaluate_DD(self, eta1, eta2, eta3, coeff, which="V3", part="r"): assert coeff.shape[:2] == (self.NbaseD[0], self.NbaseD[1]) # get real and imaginary part - coeff_r = xp.real(coeff) - coeff_i = xp.imag(coeff) + coeff_r = np.real(coeff) + coeff_i = np.imag(coeff) # ------ evaluate FEM field at given points -------- - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor-product evaluation if eta1.ndim == 1: - values_r_1 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) - values_i_1 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_r_1 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_i_1 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) eva_2d.evaluate_tensor_product_2d( self.t[0], @@ -2196,8 +2154,8 @@ def evaluate_DD(self, eta1, eta2, eta3, coeff, which="V3", part="r"): ) if self.n_tor != 0 and self.basis_tor == "r": - values_r_2 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) - values_i_2 = xp.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_r_2 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) + values_i_2 = np.empty((eta1.shape[0], eta2.shape[0]), dtype=float) eva_2d.evaluate_tensor_product_2d( self.t[0], @@ -2228,8 +2186,8 @@ def evaluate_DD(self, eta1, eta2, eta3, coeff, which="V3", part="r"): # matrix evaluation else: - values_r_1 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) - values_i_1 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_r_1 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_i_1 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) eva_2d.evaluate_matrix_2d( self.t[0], @@ -2259,8 +2217,8 @@ def evaluate_DD(self, eta1, eta2, eta3, coeff, which="V3", part="r"): ) if self.n_tor != 0 and self.basis_tor == "r": - values_r_2 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) - values_i_2 = xp.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_r_2 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) + values_i_2 = np.empty((eta1.shape[0], eta2.shape[1]), dtype=float) eva_2d.evaluate_matrix_2d( self.t[0], @@ -2291,15 +2249,15 @@ def evaluate_DD(self, eta1, eta2, eta3, coeff, which="V3", part="r"): # multiply with Fourier basis in third direction if self.n_tor == 0: - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.ones(eta3.shape, dtype=float) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.ones(eta3.shape, dtype=float) else: if self.basis_tor == "r": - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.cos(2 * xp.pi * self.n_tor * eta3) - out += (values_r_2 + 1j * values_i_2)[:, :, None] * xp.sin(2 * xp.pi * self.n_tor * eta3) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.cos(2 * np.pi * self.n_tor * eta3) + out += (values_r_2 + 1j * values_i_2)[:, :, None] * np.sin(2 * np.pi * self.n_tor * eta3) else: - out = (values_r_1 + 1j * values_i_1)[:, :, None] * xp.exp(1j * 2 * xp.pi * self.n_tor * eta3) + out = (values_r_1 + 1j * values_i_1)[:, :, None] * np.exp(1j * 2 * np.pi * self.n_tor * eta3) # --------- evaluate FEM field at given point ------- else: @@ -2356,17 +2314,17 @@ def evaluate_DD(self, eta1, eta2, eta3, coeff, which="V3", part="r"): else: if self.basis_tor == "r": - out = (real_1 + 1j * imag_1) * xp.cos(2 * xp.pi * self.n_tor * eta3) - out += (real_2 + 1j * imag_2) * xp.sin(2 * xp.pi * self.n_tor * eta3) + out = (real_1 + 1j * imag_1) * np.cos(2 * np.pi * self.n_tor * eta3) + out += (real_2 + 1j * imag_2) * np.sin(2 * np.pi * self.n_tor * eta3) else: - out = (real_1 + 1j * imag_1) * xp.exp(1j * 2 * xp.pi * self.n_tor * eta3) + out = (real_1 + 1j * imag_1) * np.exp(1j * 2 * np.pi * self.n_tor * eta3) # return real or imaginary part if part == "r": - out = xp.real(out) + out = np.real(out) else: - out = xp.imag(out) + out = np.imag(out) return out @@ -2377,13 +2335,13 @@ def evaluate_NNN(self, eta1, eta2, eta3, coeff): Parameters ---------- - eta1 : double or xp.ndarray + eta1 : double or np.ndarray 1st component of logical evaluation point - eta2 : double or xp.ndarray + eta2 : double or np.ndarray 2nd component of logical evaluation point - eta3 : double or xp.ndarray + eta3 : double or np.ndarray 3rd component of logical evaluation point coeff : array_like @@ -2398,10 +2356,10 @@ def evaluate_NNN(self, eta1, eta2, eta3, coeff): if coeff.ndim == 1: coeff = self.extract_0(coeff) - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor-product evaluation if eta1.ndim == 1: - values = xp.empty((eta1.size, eta2.size, eta3.size), dtype=float) + values = np.empty((eta1.size, eta2.size, eta3.size), dtype=float) eva_3d.evaluate_tensor_product( self.T[0], self.T[1], @@ -2422,7 +2380,7 @@ def evaluate_NNN(self, eta1, eta2, eta3, coeff): # matrix evaluation else: - values = xp.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) + values = np.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) # `eta1` is a sparse meshgrid. if max(eta1.shape) == eta1.size: eva_3d.evaluate_sparse( @@ -2492,13 +2450,13 @@ def evaluate_DNN(self, eta1, eta2, eta3, coeff): Parameters ---------- - eta1 : double or xp.ndarray + eta1 : double or np.ndarray 1st component of logical evaluation point - eta2 : double or xp.ndarray + eta2 : double or np.ndarray 2nd component of logical evaluation point - eta3 : double or xp.ndarray + eta3 : double or np.ndarray 3rd component of logical evaluation point coeff : array_like @@ -2513,10 +2471,10 @@ def evaluate_DNN(self, eta1, eta2, eta3, coeff): if coeff.ndim == 1: coeff = self.extract_1(coeff)[0] - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor product evaluation if eta1.ndim == 1: - values = xp.empty((eta1.size, eta2.size, eta3.size), dtype=float) + values = np.empty((eta1.size, eta2.size, eta3.size), dtype=float) eva_3d.evaluate_tensor_product( self.t[0], self.T[1], @@ -2537,7 +2495,7 @@ def evaluate_DNN(self, eta1, eta2, eta3, coeff): # matrix evaluation else: - values = xp.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) + values = np.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) # `eta1` is a sparse meshgrid. if max(eta1.shape) == eta1.size: eva_3d.evaluate_sparse( @@ -2606,13 +2564,13 @@ def evaluate_NDN(self, eta1, eta2, eta3, coeff): Parameters ---------- - eta1 : double or xp.ndarray + eta1 : double or np.ndarray 1st component of logical evaluation point - eta2 : double or xp.ndarray + eta2 : double or np.ndarray 2nd component of logical evaluation point - eta3 : double or xp.ndarray + eta3 : double or np.ndarray 3rd component of logical evaluation point coeff : array_like @@ -2627,10 +2585,10 @@ def evaluate_NDN(self, eta1, eta2, eta3, coeff): if coeff.ndim == 1: coeff = self.extract_1(coeff)[1] - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor product evaluation if eta1.ndim == 1: - values = xp.empty((eta1.size, eta2.size, eta3.size), dtype=float) + values = np.empty((eta1.size, eta2.size, eta3.size), dtype=float) eva_3d.evaluate_tensor_product( self.T[0], self.t[1], @@ -2651,7 +2609,7 @@ def evaluate_NDN(self, eta1, eta2, eta3, coeff): # matrix evaluation else: - values = xp.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) + values = np.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) # `eta1` is a sparse meshgrid. if max(eta1.shape) == eta1.size: eva_3d.evaluate_sparse( @@ -2720,13 +2678,13 @@ def evaluate_NND(self, eta1, eta2, eta3, coeff): Parameters ---------- - eta1 : double or xp.ndarray + eta1 : double or np.ndarray 1st component of logical evaluation point - eta2 : double or xp.ndarray + eta2 : double or np.ndarray 2nd component of logical evaluation point - eta3 : double or xp.ndarray + eta3 : double or np.ndarray 3rd component of logical evaluation point coeff : array_like @@ -2741,10 +2699,10 @@ def evaluate_NND(self, eta1, eta2, eta3, coeff): if coeff.ndim == 1: coeff = self.extract_1(coeff)[2] - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor product evaluation if eta1.ndim == 1: - values = xp.empty((eta1.size, eta2.size, eta3.size), dtype=float) + values = np.empty((eta1.size, eta2.size, eta3.size), dtype=float) eva_3d.evaluate_tensor_product( self.T[0], self.T[1], @@ -2765,7 +2723,7 @@ def evaluate_NND(self, eta1, eta2, eta3, coeff): # matrix evaluation else: - values = xp.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) + values = np.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) # `eta1` is a sparse meshgrid. if max(eta1.shape) == eta1.size: eva_3d.evaluate_sparse( @@ -2834,13 +2792,13 @@ def evaluate_NDD(self, eta1, eta2, eta3, coeff): Parameters ---------- - eta1 : double or xp.ndarray + eta1 : double or np.ndarray 1st component of logical evaluation point - eta2 : double or xp.ndarray + eta2 : double or np.ndarray 2nd component of logical evaluation point - eta3 : double or xp.ndarray + eta3 : double or np.ndarray 3rd component of logical evaluation point coeff : array_like @@ -2855,10 +2813,10 @@ def evaluate_NDD(self, eta1, eta2, eta3, coeff): if coeff.ndim == 1: coeff = self.extract_2(coeff)[0] - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor product evaluation if eta1.ndim == 1: - values = xp.empty((eta1.size, eta2.size, eta3.size), dtype=float) + values = np.empty((eta1.size, eta2.size, eta3.size), dtype=float) eva_3d.evaluate_tensor_product( self.T[0], self.t[1], @@ -2879,7 +2837,7 @@ def evaluate_NDD(self, eta1, eta2, eta3, coeff): # matrix evaluation else: - values = xp.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) + values = np.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) # `eta1` is a sparse meshgrid. if max(eta1.shape) == eta1.size: eva_3d.evaluate_sparse( @@ -2948,13 +2906,13 @@ def evaluate_DND(self, eta1, eta2, eta3, coeff): Parameters ---------- - eta1 : double or xp.ndarray + eta1 : double or np.ndarray 1st component of logical evaluation point - eta2 : double or xp.ndarray + eta2 : double or np.ndarray 2nd component of logical evaluation point - eta3 : double or xp.ndarray + eta3 : double or np.ndarray 3rd component of logical evaluation point coeff : array_like @@ -2969,10 +2927,10 @@ def evaluate_DND(self, eta1, eta2, eta3, coeff): if coeff.ndim == 1: coeff = self.extract_2(coeff)[1] - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor product evaluation if eta1.ndim == 1: - values = xp.empty((eta1.size, eta2.size, eta3.size), dtype=float) + values = np.empty((eta1.size, eta2.size, eta3.size), dtype=float) eva_3d.evaluate_tensor_product( self.t[0], self.T[1], @@ -2993,7 +2951,7 @@ def evaluate_DND(self, eta1, eta2, eta3, coeff): # matrix evaluation else: - values = xp.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) + values = np.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) # `eta1` is a sparse meshgrid. if max(eta1.shape) == eta1.size: eva_3d.evaluate_sparse( @@ -3062,13 +3020,13 @@ def evaluate_DDN(self, eta1, eta2, eta3, coeff): Parameters ---------- - eta1 : double or xp.ndarray + eta1 : double or np.ndarray 1st component of logical evaluation point - eta2 : double or xp.ndarray + eta2 : double or np.ndarray 2nd component of logical evaluation point - eta3 : double or xp.ndarray + eta3 : double or np.ndarray 3rd component of logical evaluation point coeff : array_like @@ -3083,10 +3041,10 @@ def evaluate_DDN(self, eta1, eta2, eta3, coeff): if coeff.ndim == 1: coeff = self.extract_2(coeff)[2] - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor product evaluation if eta1.ndim == 1: - values = xp.empty((eta1.size, eta2.size, eta3.size), dtype=float) + values = np.empty((eta1.size, eta2.size, eta3.size), dtype=float) eva_3d.evaluate_tensor_product( self.t[0], self.t[1], @@ -3107,7 +3065,7 @@ def evaluate_DDN(self, eta1, eta2, eta3, coeff): # matrix evaluation else: - values = xp.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) + values = np.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) # `eta1` is a sparse meshgrid. if max(eta1.shape) == eta1.size: eva_3d.evaluate_sparse( @@ -3176,13 +3134,13 @@ def evaluate_DDD(self, eta1, eta2, eta3, coeff): Parameters ---------- - eta1 : double or xp.ndarray + eta1 : double or np.ndarray 1st component of logical evaluation point - eta2 : double or xp.ndarray + eta2 : double or np.ndarray 2nd component of logical evaluation point - eta3 : double or xp.ndarray + eta3 : double or np.ndarray 3rd component of logical evaluation point coeff : array_like @@ -3197,10 +3155,10 @@ def evaluate_DDD(self, eta1, eta2, eta3, coeff): if coeff.ndim == 1: coeff = self.extract_3(coeff) - if isinstance(eta1, xp.ndarray): + if isinstance(eta1, np.ndarray): # tensor product evaluation if eta1.ndim == 1: - values = xp.empty((eta1.size, eta2.size, eta3.size), dtype=float) + values = np.empty((eta1.size, eta2.size, eta3.size), dtype=float) eva_3d.evaluate_tensor_product( self.t[0], self.t[1], @@ -3221,7 +3179,7 @@ def evaluate_DDD(self, eta1, eta2, eta3, coeff): # matrix evaluation else: - values = xp.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) + values = np.empty((eta1.shape[0], eta2.shape[1], eta3.shape[2]), dtype=float) # `eta1` is a sparse meshgrid. if max(eta1.shape) == eta1.size: eva_3d.evaluate_sparse( diff --git a/src/struphy/examples/_draw_parallel.py b/src/struphy/examples/_draw_parallel.py index b31ba19c4..b2f3cef83 100644 --- a/src/struphy/examples/_draw_parallel.py +++ b/src/struphy/examples/_draw_parallel.py @@ -1,9 +1,9 @@ -import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.feec.psydac_derham import Derham from struphy.geometry import domains from struphy.pic.particles import Particles6D +from struphy.utils.arrays import xp as np def main(): @@ -69,19 +69,19 @@ def main(): ) # are all markers in the correct domain? - conds = xp.logical_and( + conds = np.logical_and( particles.markers[:, :3] > derham.domain_array[rank, 0::3], particles.markers[:, :3] < derham.domain_array[rank, 1::3], ) holes = particles.markers[:, 0] == -1.0 - stay = xp.all(conds, axis=1) + stay = np.all(conds, axis=1) - error_mks = particles.markers[xp.logical_and(~stay, ~holes)] + error_mks = particles.markers[np.logical_and(~stay, ~holes)] print( - f"rank {rank} | markers not on correct process: {xp.nonzero(xp.logical_and(~stay, ~holes))} \ - \n corresponding positions:\n {error_mks[:, :3]}", + f"rank {rank} | markers not on correct process: {np.nonzero(np.logical_and(~stay, ~holes))} \ + \n corresponding positions:\n {error_mks[:, :3]}" ) assert error_mks.size == 0 diff --git a/src/struphy/examples/restelli2018/callables.py b/src/struphy/examples/restelli2018/callables.py index 505a60f90..4a4b3d5c5 100644 --- a/src/struphy/examples/restelli2018/callables.py +++ b/src/struphy/examples/restelli2018/callables.py @@ -1,6 +1,6 @@ "Analytical callables needed for the simulation of the Two-Fluid Quasi-Neutral Model by Restelli." -import cunumpy as xp +from struphy.utils.arrays import xp as np class RestelliForcingTerm: @@ -74,9 +74,9 @@ def __init__(self, nu=1.0, R0=2.0, a=1.0, B0=10.0, Bp=12.5, alpha=0.1, beta=1.0, self._eps_norm = eps def __call__(self, x, y, z): - R = xp.sqrt(x**2 + y**2) - R = xp.where(R == 0.0, 1e-9, R) - phi = xp.arctan2(-y, x) + R = np.sqrt(x**2 + y**2) + R = np.where(R == 0.0, 1e-9, R) + phi = np.arctan2(-y, x) force_Z = self._nu * ( self._alpha * (self._R0 - 4 * R) / (self._a * self._R0 * R) - self._beta * self._Bp * self._R0**2 / (self._B0 * self._a * R**3) @@ -197,31 +197,31 @@ def __init__( def __call__(self, x, y, z): A = self._alpha / (self._a * self._R0) C = self._beta * self._Bp * self._R0 / (self._B0 * self._a) - R = xp.sqrt(x**2 + y**2) - R = xp.where(R == 0.0, 1e-9, R) - phi = xp.arctan2(-y, x) + R = np.sqrt(x**2 + y**2) + R = np.where(R == 0.0, 1e-9, R) + phi = np.arctan2(-y, x) if self._species == "Ions": """Forceterm for ions on the right hand side.""" if self._dimension == "2D": fx = ( - -2.0 * xp.pi * xp.sin(2 * xp.pi * x) - + xp.cos(2 * xp.pi * x) * xp.cos(2 * xp.pi * y) * self._B0 / self._eps_norm - - self._nu * 8.0 * xp.pi**2 * xp.sin(2 * xp.pi * x) * xp.sin(2 * xp.pi * y) + -2.0 * np.pi * np.sin(2 * np.pi * x) + + np.cos(2 * np.pi * x) * np.cos(2 * np.pi * y) * self._B0 / self._eps_norm + - self._nu * 8.0 * np.pi**2 * np.sin(2 * np.pi * x) * np.sin(2 * np.pi * y) ) fy = ( - 2.0 * xp.pi * xp.cos(2 * xp.pi * y) - - xp.sin(2 * xp.pi * x) * xp.sin(2 * xp.pi * y) * self._B0 / self._eps_norm - - self._nu * 8.0 * xp.pi**2 * xp.cos(2 * xp.pi * x) * xp.cos(2 * xp.pi * y) + 2.0 * np.pi * np.cos(2 * np.pi * y) + - np.sin(2 * np.pi * x) * np.sin(2 * np.pi * y) * self._B0 / self._eps_norm + - self._nu * 8.0 * np.pi**2 * np.cos(2 * np.pi * x) * np.cos(2 * np.pi * y) ) fz = 0.0 * x elif self._dimension == "1D": fx = ( - 2.0 * xp.pi * xp.cos(2 * xp.pi * x) - + self._nu * 4.0 * xp.pi**2 * xp.sin(2 * xp.pi * x) - + (xp.sin(2 * xp.pi * x) + 1.0) / self._dt + 2.0 * np.pi * np.cos(2 * np.pi * x) + + self._nu * 4.0 * np.pi**2 * np.sin(2 * np.pi * x) + + (np.sin(2 * np.pi * x) + 1.0) / self._dt ) - fy = (xp.sin(2 * xp.pi * x) + 1.0) * self._B0 / self._eps_norm + fy = (np.sin(2 * np.pi * x) + 1.0) * self._B0 / self._eps_norm fz = 0.0 * x elif self._dimension == "Tokamak": @@ -234,8 +234,8 @@ def __call__(self, x, y, z): fZ = self._alpha * self._B0 * z / self._a + A * self._R0 / R * ((R - self._R0) * self._B0) fphi = A * self._R0 * self._Bp / (self._a * R**2) * ((R - self._R0) ** 2 + z**2) - fx = xp.cos(phi) * fR - R * xp.sin(phi) * fphi - fy = -xp.sin(phi) * fR - R * xp.cos(phi) * fphi + fx = np.cos(phi) * fR - R * np.sin(phi) * fphi + fy = -np.sin(phi) * fR - R * np.cos(phi) * fphi fz = fZ if self._comp == "0": @@ -251,26 +251,26 @@ def __call__(self, x, y, z): """Forceterm for electrons on the right hand side.""" if self._dimension == "2D": fx = ( - 2.0 * xp.pi * xp.sin(2 * xp.pi * x) - - xp.cos(4 * xp.pi * x) * xp.cos(4 * xp.pi * y) * self._B0 / self._eps_norm - - self._nu_e * 32.0 * xp.pi**2 * xp.sin(4 * xp.pi * x) * xp.sin(4 * xp.pi * y) - - self._stab_sigma * (-xp.sin(4 * xp.pi * x) * xp.sin(4 * xp.pi * y)) + 2.0 * np.pi * np.sin(2 * np.pi * x) + - np.cos(4 * np.pi * x) * np.cos(4 * np.pi * y) * self._B0 / self._eps_norm + - self._nu_e * 32.0 * np.pi**2 * np.sin(4 * np.pi * x) * np.sin(4 * np.pi * y) + - self._stab_sigma * (-np.sin(4 * np.pi * x) * np.sin(4 * np.pi * y)) ) fy = ( - -2.0 * xp.pi * xp.cos(2 * xp.pi * y) - + xp.sin(4 * xp.pi * x) * xp.sin(4 * xp.pi * y) * self._B0 / self._eps_norm - - self._nu_e * 32.0 * xp.pi**2 * xp.cos(4 * xp.pi * x) * xp.cos(4 * xp.pi * y) - - self._stab_sigma * (-xp.cos(4 * xp.pi * x) * xp.cos(4 * xp.pi * y)) + -2.0 * np.pi * np.cos(2 * np.pi * y) + + np.sin(4 * np.pi * x) * np.sin(4 * np.pi * y) * self._B0 / self._eps_norm + - self._nu_e * 32.0 * np.pi**2 * np.cos(4 * np.pi * x) * np.cos(4 * np.pi * y) + - self._stab_sigma * (-np.cos(4 * np.pi * x) * np.cos(4 * np.pi * y)) ) fz = 0.0 * x elif self._dimension == "1D": fx = ( - -2.0 * xp.pi * xp.cos(2 * xp.pi * x) - + self._nu_e * 4.0 * xp.pi**2 * xp.sin(2 * xp.pi * x) - - self._stab_sigma * xp.sin(2 * xp.pi * x) + -2.0 * np.pi * np.cos(2 * np.pi * x) + + self._nu_e * 4.0 * np.pi**2 * np.sin(2 * np.pi * x) + - self._stab_sigma * np.sin(2 * np.pi * x) ) - fy = -xp.sin(2 * xp.pi * x) * self._B0 / self._eps_norm + fy = -np.sin(2 * np.pi * x) * self._B0 / self._eps_norm fz = 0.0 * x elif self._dimension == "Tokamak": @@ -283,8 +283,8 @@ def __call__(self, x, y, z): fZ = -self._alpha * self._B0 * z / self._a - A * self._R0 / R * ((R - self._R0) * self._B0) fphi = -A * self._R0 * self._Bp / (self._a * R**2) * ((R - self._R0) ** 2 + z**2) - fx = xp.cos(phi) * fR - R * xp.sin(phi) * fphi - fy = -xp.sin(phi) * fR - R * xp.cos(phi) * fphi + fx = np.cos(phi) * fR - R * np.sin(phi) * fphi + fy = -np.sin(phi) * fR - R * np.cos(phi) * fphi fz = fZ if self._comp == "0": diff --git a/src/struphy/feec/basis_projection_ops.py b/src/struphy/feec/basis_projection_ops.py index ab0925828..8d4f24d54 100644 --- a/src/struphy/feec/basis_projection_ops.py +++ b/src/struphy/feec/basis_projection_ops.py @@ -1,4 +1,3 @@ -import cunumpy as xp from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL from psydac.ddm.mpi import mpi as MPI from psydac.fem.basic import FemSpace @@ -15,6 +14,7 @@ from struphy.feec.utilities import RotationMatrix from struphy.polar.basic import PolarDerhamSpace, PolarVector from struphy.polar.linear_operators import PolarExtractionOperator +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel @@ -51,7 +51,7 @@ def __init__(self, derham, domain, verbose=True, **weights): self._rank = derham.comm.Get_rank() if derham.comm is not None else 0 - if xp.any([p == 1 and Nel > 1 for p, Nel in zip(derham.p, derham.Nel)]): + if np.any([p == 1 and Nel > 1 for p, Nel in zip(derham.p, derham.Nel)]): if MPI.COMM_WORLD.Get_rank() == 0: print( f'\nWARNING: Class "BasisProjectionOperators" called with p={derham.p} (interpolation of piece-wise constants should be avoided).', @@ -147,7 +147,7 @@ def K3(self): e3, ) / self.sqrt_g(e1, e2, e3), - ], + ] ] self._K3 = self.create_basis_op( fun, @@ -263,7 +263,7 @@ def Q3(self): e3, ) / self.sqrt_g(e1, e2, e3), - ], + ] ] self._Q3 = self.create_basis_op( fun, @@ -1051,11 +1051,11 @@ def __init__( if isinstance(V, TensorFemSpace): self._Vspaces = [V.coeff_space] self._V1ds = [V.spaces] - self._VNbasis = xp.array([self._V1ds[0][0].nbasis, self._V1ds[0][1].nbasis, self._V1ds[0][2].nbasis]) + self._VNbasis = np.array([self._V1ds[0][0].nbasis, self._V1ds[0][1].nbasis, self._V1ds[0][2].nbasis]) else: self._Vspaces = V.coeff_space self._V1ds = [comp.spaces for comp in V.spaces] - self._VNbasis = xp.array( + self._VNbasis = np.array( [ [self._V1ds[0][0].nbasis, self._V1ds[0][1].nbasis, self._V1ds[0][2].nbasis], [ @@ -1064,7 +1064,7 @@ def __init__( self._V1ds[1][2].nbasis, ], [self._V1ds[2][0].nbasis, self._V1ds[2][1].nbasis, self._V1ds[2][2].nbasis], - ], + ] ) # output space: 3d StencilVectorSpaces and 1d SplineSpaces of each component @@ -1283,7 +1283,7 @@ def assemble(self, verbose=False): self._pds, self._periodic, self._p, - xp.array([col0, col1, col2]), + np.array([col0, col1, col2]), self._VNbasis, self._mat._data, coeff, @@ -1358,12 +1358,12 @@ def assemble(self, verbose=False): self._pds, self._periodic, self._p, - xp.array( + np.array( [ col0, col1, col2, - ], + ] ), self._VNbasis[hh], Aux._data, @@ -1438,12 +1438,12 @@ def assemble(self, verbose=False): self._pds[h], self._periodic, self._p, - xp.array( + np.array( [ col0, col1, col2, - ], + ] ), self._VNbasis, Aux[h]._data, @@ -1539,12 +1539,12 @@ def assemble(self, verbose=False): self._pds[h], self._periodic, self._p, - xp.array( + np.array( [ col0, col1, col2, - ], + ] ), self._VNbasis[hh], Aux[h]._data, @@ -1613,7 +1613,7 @@ class BasisProjectionOperator(LinOpWithTransp): Finite element spline space (domain, input space). weights : list - Weight function(s) (callables or xp.ndarrays) in a 2d list of shape corresponding to number of components of domain/codomain. + Weight function(s) (callables or np.ndarrays) in a 2d list of shape corresponding to number of components of domain/codomain. V_extraction_op : PolarExtractionOperator | IdentityOperator Extraction operator to polar sub-space of V. @@ -1889,7 +1889,7 @@ def update_weights(self, weights): Parameters ---------- weights : list - Weight function(s) (callables or xp.ndarrays) in a 2d list of shape corresponding to number of components of domain/codomain. + Weight function(s) (callables or np.ndarrays) in a 2d list of shape corresponding to number of components of domain/codomain. """ self._weights = weights @@ -1945,13 +1945,13 @@ def assemble(self, weights=None, verbose=False): # input vector space (domain), column of block for j, (Vspace, V1d, loc_weight) in enumerate(zip(_Vspaces, _V1ds, weight_line)): - _starts_in = xp.array(Vspace.starts) - _ends_in = xp.array(Vspace.ends) - _pads_in = xp.array(Vspace.pads) + _starts_in = np.array(Vspace.starts) + _ends_in = np.array(Vspace.ends) + _pads_in = np.array(Vspace.pads) - _starts_out = xp.array(Wspace.starts) - _ends_out = xp.array(Wspace.ends) - _pads_out = xp.array(Wspace.pads) + _starts_out = np.array(Wspace.starts) + _ends_out = np.array(Wspace.ends) + _pads_out = np.array(Wspace.pads) # use cached information if asked if self._use_cache: @@ -1998,21 +1998,21 @@ def assemble(self, weights=None, verbose=False): # Evaluate weight function at quadrature points # evaluate weight at quadrature points if callable(loc_weight): - PTS = xp.meshgrid(*_ptsG, indexing="ij") + PTS = np.meshgrid(*_ptsG, indexing="ij") mat_w = loc_weight(*PTS).copy() - elif isinstance(loc_weight, xp.ndarray): + elif isinstance(loc_weight, np.ndarray): assert loc_weight.shape == (len(_ptsG[0]), len(_ptsG[1]), len(_ptsG[2])) mat_w = loc_weight elif loc_weight is not None: raise TypeError( - "weights must be xp.ndarray, callable or None", + "weights must be np.ndarray, callable or None", ) # Call the kernel if weight function is not zero or in the scalar case # to avoid calling _block of a StencilMatrix in the else - not_weight_zero = xp.array( - int(loc_weight is not None and xp.any(xp.abs(mat_w) > 1e-14)), + not_weight_zero = np.array( + int(loc_weight is not None and np.any(np.abs(mat_w) > 1e-14)), ) if self._mpi_comm is not None: @@ -2043,7 +2043,7 @@ def assemble(self, weights=None, verbose=False): getattr( basis_projection_kernels, "assemble_dofs_for_weighted_basisfuns_" + str(V.ldim) + "d", - ), + ) ) if rank == 0 and verbose: @@ -2385,7 +2385,7 @@ def find_relative_col(col, row, Nbasis, periodic): The relative column position of col with respect to the the current row of the StencilMatrix. """ - if not periodic: + if periodic == False: relativecol = col - row # In the periodic case we must account for the possible looping of the basis functions when computing the relative row postion else: diff --git a/src/struphy/feec/linear_operators.py b/src/struphy/feec/linear_operators.py index 28b4a0805..d3c4770e4 100644 --- a/src/struphy/feec/linear_operators.py +++ b/src/struphy/feec/linear_operators.py @@ -1,7 +1,6 @@ import itertools from abc import abstractmethod -import cunumpy as xp from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI from psydac.linalg.basic import LinearOperator, Vector, VectorSpace @@ -11,6 +10,7 @@ from struphy.feec.utilities import apply_essential_bc_to_array from struphy.polar.basic import PolarDerhamSpace +from struphy.utils.arrays import xp as np class LinOpWithTransp(LinearOperator): @@ -63,17 +63,17 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): rank = comm.Get_rank() size = comm.Get_size() - if not is_sparse: + if is_sparse == False: if out is None: # We declare the matrix form of our linear operator - out = xp.zeros([self.codomain.dimension, self.domain.dimension], dtype=self.dtype) + out = np.zeros([self.codomain.dimension, self.domain.dimension], dtype=self.dtype) else: - assert isinstance(out, xp.ndarray) + assert isinstance(out, np.ndarray) assert out.shape[0] == self.codomain.dimension assert out.shape[1] == self.domain.dimension # We use this matrix to store the partial results that we shall combine into the final matrix with a reduction at the end - result = xp.zeros((self.codomain.dimension, self.domain.dimension), dtype=self.dtype) + result = np.zeros((self.codomain.dimension, self.domain.dimension), dtype=self.dtype) else: if out is not None: raise Exception("If is_sparse is True then out must be set to None.") @@ -97,10 +97,10 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): ndim = [sp.ndim for sp in self.domain.spaces] # First each rank is going to need to know the starts and ends of all other ranks - startsarr = xp.array([starts[i][j] for i in range(nsp) for j in range(ndim[i])], dtype=int) + startsarr = np.array([starts[i][j] for i in range(nsp) for j in range(ndim[i])], dtype=int) # Create an array to store gathered data from all ranks - allstarts = xp.empty(size * len(startsarr), dtype=int) + allstarts = np.empty(size * len(startsarr), dtype=int) # Use Allgather to gather 'starts' from all ranks into 'allstarts' if comm is None or isinstance(comm, MockComm): @@ -111,9 +111,9 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): # Reshape 'allstarts' to have 9 columns and 'size' rows allstarts = allstarts.reshape((size, len(startsarr))) - endsarr = xp.array([ends[i][j] for i in range(nsp) for j in range(ndim[i])], dtype=int) + endsarr = np.array([ends[i][j] for i in range(nsp) for j in range(ndim[i])], dtype=int) # Create an array to store gathered data from all ranks - allends = xp.empty(size * len(endsarr), dtype=int) + allends = np.empty(size * len(endsarr), dtype=int) # Use Allgather to gather 'ends' from all ranks into 'allends' if comm is None or isinstance(comm, MockComm): @@ -136,7 +136,7 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): itterables = [] for i in range(ndim[h]): itterables.append( - range(allstarts[currentrank][i + npredim], allends[currentrank][i + npredim] + 1), + range(allstarts[currentrank][i + npredim], allends[currentrank][i + npredim] + 1) ) # We iterate over all the entries that belong to rank number currentrank for i in itertools.product(*itterables): @@ -148,13 +148,13 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): self.dot(v, out=tmp2) # Compute to which column this iteration belongs col = spoint - col += xp.ravel_multi_index(i, npts[h]) - if not is_sparse: + col += np.ravel_multi_index(i, npts[h]) + if is_sparse == False: result[:, col] = tmp2.toarray() else: aux = tmp2.toarray() # We now need to now which entries on tmp2 are non-zero and store then in our data list - for l in xp.where(aux != 0)[0]: + for l in np.where(aux != 0)[0]: data.append(aux[l]) colarr.append(col) row.append(l) @@ -179,9 +179,9 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): ndim = self.domain.ndim # First each rank is going to need to know the starts and ends of all other ranks - startsarr = xp.array([starts[j] for j in range(ndim)], dtype=int) + startsarr = np.array([starts[j] for j in range(ndim)], dtype=int) # Create an array to store gathered data from all ranks - allstarts = xp.empty(size * len(startsarr), dtype=int) + allstarts = np.empty(size * len(startsarr), dtype=int) # Use Allgather to gather 'starts' from all ranks into 'allstarts' if comm is None or isinstance(comm, MockComm): @@ -192,9 +192,9 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): # Reshape 'allstarts' to have 3 columns and 'size' rows allstarts = allstarts.reshape((size, len(startsarr))) - endsarr = xp.array([ends[j] for j in range(ndim)], dtype=int) + endsarr = np.array([ends[j] for j in range(ndim)], dtype=int) # Create an array to store gathered data from all ranks - allends = xp.empty(size * len(endsarr), dtype=int) + allends = np.empty(size * len(endsarr), dtype=int) # Use Allgather to gather 'ends' from all ranks into 'allends' if comm is None or isinstance(comm, MockComm): @@ -219,13 +219,13 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): # Compute dot product with the linear operator. self.dot(v, out=tmp2) # Compute to which column this iteration belongs - col = xp.ravel_multi_index(i, npts) - if not is_sparse: + col = np.ravel_multi_index(i, npts) + if is_sparse == False: result[:, col] = tmp2.toarray() else: aux = tmp2.toarray() # We now need to now which entries on tmp2 are non-zero and store then in our data list - for l in xp.where(aux != 0)[0]: + for l in np.where(aux != 0)[0]: data.append(aux[l]) colarr.append(col) row.append(l) @@ -237,7 +237,7 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): # I cannot conceive any situation where this error should be thrown, but I put it here just in case something unexpected happens. raise Exception("Function toarray_struphy() only supports Stencil Vectors or Block Vectors.") - if not is_sparse: + if is_sparse == False: # Use Allreduce to perform addition reduction and give one copy of the result to all ranks. if comm is None or isinstance(comm, MockComm): out[:] = result @@ -293,7 +293,7 @@ def toarray_struphy(self, out=None, is_sparse=False, format="csr"): return sparse.csr_matrix((all_data, (all_rows, all_cols)), shape=(numrows, numcols)).todia() else: raise Exception( - "The selected sparse matrix format must be one of the following : csr, csc, bsr, lil, dok, coo or dia.", + "The selected sparse matrix format must be one of the following : csr, csc, bsr, lil, dok, coo or dia." ) @@ -309,7 +309,7 @@ class BoundaryOperator(LinOpWithTransp): space_id : str Symbolic space ID of vector_space (H1, Hcurl, Hdiv, L2 or H1vec). - dirichlet_bc : tuple[tuple[bool]] + dirichlet_bc : list[list[bool]] Whether to apply homogeneous Dirichlet boundary conditions (at left or right boundary in each direction). """ @@ -324,7 +324,7 @@ def __init__(self, vector_space, space_id, dirichlet_bc): self._space_id = space_id self._bc = dirichlet_bc - assert isinstance(dirichlet_bc, tuple) + assert isinstance(dirichlet_bc, list) assert len(dirichlet_bc) == 3 # number of non-zero elements in poloidal/toroidal direction diff --git a/src/struphy/feec/local_projectors_kernels.py b/src/struphy/feec/local_projectors_kernels.py index 706b3f78e..09b4c182f 100644 --- a/src/struphy/feec/local_projectors_kernels.py +++ b/src/struphy/feec/local_projectors_kernels.py @@ -63,7 +63,7 @@ def get_local_problem_size(periodic: "bool[:]", p: "int[:]", IoH: "bool[:]"): for h in range(3): # Interpolation - if not IoH[h]: + if IoH[h] == False: lenj[h] = 2 * p[h] - 1 # Histopolation else: @@ -168,10 +168,7 @@ def get_dofs_local_1_form_ec_component_weighted( @stack_array("shp") def get_dofs_local_1_form_ec_component( - args_solve: LocalProjectorsArguments, - f3: "float[:,:,:]", - f_eval_aux: "float[:,:,:]", - c: int, + args_solve: LocalProjectorsArguments, f3: "float[:,:,:]", f_eval_aux: "float[:,:,:]", c: int ): """Kernel for evaluating the degrees of freedom for the c-th component of 1-forms. This function is for local commuting projetors. @@ -223,10 +220,7 @@ def get_dofs_local_1_form_ec_component( @stack_array("shp") def get_dofs_local_2_form_ec_component( - args_solve: LocalProjectorsArguments, - fc: "float[:,:,:]", - f_eval_aux: "float[:,:,:]", - c: int, + args_solve: LocalProjectorsArguments, fc: "float[:,:,:]", f_eval_aux: "float[:,:,:]", c: int ): """Kernel for evaluating the degrees of freedom for the c-th component of 2-forms. This function is for local commuting projetors. @@ -734,7 +728,7 @@ def solve_local_main_loop_weighted( if counteri0 >= rows0[i00] and counteri0 <= rowe0[i00]: compute0 = True break - if compute0: + if compute0 == True: counteri1 = 0 for i1 in range(args_solve.starts[1], args_solve.ends[1] + 1): # This bool variable tell us if this row has a non-zero FE coefficient, based on the current basis function we are using on our projection @@ -744,7 +738,7 @@ def solve_local_main_loop_weighted( if counteri1 >= rows1[i11] and counteri1 <= rowe1[i11]: compute1 = True break - if compute1: + if compute1 == True: counteri2 = 0 for i2 in range(args_solve.starts[2], args_solve.ends[2] + 1): # This bool variable tell us if this row has a non-zero FE coefficient, based on the current basis function we are using on our projection @@ -754,7 +748,7 @@ def solve_local_main_loop_weighted( if counteri2 >= rows2[i22] and counteri2 <= rowe2[i22]: compute2 = True break - if compute2: + if compute2 == True: L123 = 0.0 startj1, endj1 = select_quasi_points( i0, @@ -850,7 +844,7 @@ def find_relative_col(col: int, row: int, Nbasis: int, periodic: bool): The relative column position of col with respect to the the current row of the StencilMatrix. """ - if not periodic: + if periodic == False: relativecol = col - row # In the periodic case we must account for the possible looping of the basis functions when computing the relative row postion else: @@ -944,7 +938,7 @@ def assemble_basis_projection_operator_local( compute0 = True break relativecol0 = find_relative_col(col[0], row0, VNbasis[0], periodic[0]) - if relativecol0 >= -p[0] and relativecol0 <= p[0] and compute0: + if relativecol0 >= -p[0] and relativecol0 <= p[0] and compute0 == True: count1 = 0 for row1 in range(starts[1], ends[1] + 1): # This bool variable tell us if this row has a non-zero FE coefficient, based on the current basis function we are using on our projection @@ -955,7 +949,7 @@ def assemble_basis_projection_operator_local( compute1 = True break relativecol1 = find_relative_col(col[1], row1, VNbasis[1], periodic[1]) - if relativecol1 >= -p[1] and relativecol1 <= p[1] and compute1: + if relativecol1 >= -p[1] and relativecol1 <= p[1] and compute1 == True: count2 = 0 for row2 in range(starts[2], ends[2] + 1): # This bool variable tell us if this row has a non-zero FE coefficient, based on the current basis function we are using on our projection @@ -966,7 +960,7 @@ def assemble_basis_projection_operator_local( compute2 = True break relativecol2 = find_relative_col(col[2], row2, VNbasis[2], periodic[2]) - if relativecol2 >= -p[2] and relativecol2 <= p[2] and compute2: + if relativecol2 >= -p[2] and relativecol2 <= p[2] and compute2 == True: mat[ count0 + pds[0], count1 + pds[1], @@ -1002,7 +996,7 @@ def are_quadrature_points_zero(aux: "int[:]", p: int, basis: "float[:]"): if basis[in_start + ii] != 0.0: all_zero = False break - if all_zero: + if all_zero == True: aux[i] = 0 @@ -1043,15 +1037,7 @@ def get_rows_periodic(starts: int, ends: int, modl: int, modr: int, Nbasis: int, def get_rows( - col: int, - starts: int, - ends: int, - p: int, - Nbasis: int, - periodic: bool, - IoH: bool, - BoD: bool, - aux: "int[:]", + col: int, starts: int, ends: int, p: int, Nbasis: int, periodic: bool, IoH: bool, BoD: bool, aux: "int[:]" ): """Kernel for getting the list of rows that are non-zero for the current BasisProjectionLocal column, within the start and end indices of the current MPI rank. @@ -1085,33 +1071,33 @@ def get_rows( Array where we put a one if the current row could have a non-zero FE coefficient for the column given by col. """ # Periodic boundary conditions - if periodic: + if periodic == True: # Histopolation - if IoH: + if IoH == True: # D-splines - if BoD: + if BoD == True: get_rows_periodic(starts, ends, -p + 1, p, Nbasis, col, aux) # B-splines - if not BoD: + if BoD == False: get_rows_periodic(starts, ends, -p + 1, p + 1, Nbasis, col, aux) # Interpolation - if not IoH: + if IoH == False: # D-splines - if BoD: + if BoD == True: # Special case p = 1 if p == 1: get_rows_periodic(starts, ends, -1, 1, Nbasis, col, aux) if p != 1: get_rows_periodic(starts, ends, -p + 1, p - 1, Nbasis, col, aux) # B-splines - if not BoD: + if BoD == False: get_rows_periodic(starts, ends, -p + 1, p, Nbasis, col, aux) # Clamped boundary conditions - if not periodic: + if periodic == False: # Histopolation - if IoH: + if IoH == True: # D-splines - if BoD: + if BoD == True: count = 0 for row in range(starts, ends + 1): if row >= 0 and row <= (p - 2) and col >= 0 and col <= row + p - 1: @@ -1124,7 +1110,7 @@ def get_rows( aux[count] = 1 count += 1 # B-splines - if not BoD: + if BoD == False: count = 0 for row in range(starts, ends + 1): if row >= 0 and row <= (p - 2) and col >= 0 and col <= (row + p): @@ -1135,9 +1121,9 @@ def get_rows( aux[count] = 1 count += 1 # Interpolation - if not IoH: + if IoH == False: # D-splines - if BoD: + if BoD == True: count = 0 for row in range(starts, ends + 1): if row == 0 and col <= (p - 1): @@ -1152,7 +1138,7 @@ def get_rows( aux[count] = 1 count += 1 # B-splines - if not BoD: + if BoD == False: count = 0 for row in range(starts, ends + 1): if row == 0 and col <= p: diff --git a/src/struphy/feec/mass.py b/src/struphy/feec/mass.py index 5964f5f7c..a86e02be8 100644 --- a/src/struphy/feec/mass.py +++ b/src/struphy/feec/mass.py @@ -1,14 +1,11 @@ import inspect -from copy import deepcopy -import cunumpy as xp from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL from psydac.ddm.mpi import mpi as MPI from psydac.fem.tensor import TensorFemSpace from psydac.fem.vector import VectorFemSpace from psydac.linalg.basic import IdentityOperator, LinearOperator, Vector from psydac.linalg.block import BlockLinearOperator, BlockVector -from psydac.linalg.solvers import inverse from psydac.linalg.stencil import StencilDiagonalMatrix, StencilMatrix, StencilVector from struphy.feec import mass_kernels @@ -17,6 +14,7 @@ from struphy.feec.utilities import RotationMatrix from struphy.geometry.base import Domain from struphy.polar.linear_operators import PolarExtractionOperator +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel @@ -449,7 +447,7 @@ def M2B_div0(self): self.weights[self.selected_weight].a1_1, self.weights[self.selected_weight].a1_2, self.weights[self.selected_weight].a1_3, - ], + ] ) tmp_b2 = self.derham.curl.dot(a_eq) @@ -555,7 +553,7 @@ def M2Bn(self): self.weights[self.selected_weight].a1_1, self.weights[self.selected_weight].a1_2, self.weights[self.selected_weight].a1_3, - ], + ] ) tmp_b2 = self.derham.curl.dot(a_eq) @@ -731,12 +729,6 @@ def M1gyro(self): return self._M1gyro - @property - def WMM(self): - if not hasattr(self, "_WMM"): - self._WMM = self.H1vecMassMatrix_density(self.derham, self, self.domain) - return self._WMM - ####################################### # Wrapper around WeightedMassOperator # ####################################### @@ -777,7 +769,7 @@ def create_weighted_mass( 1. ``str`` : for square block matrices (V=W), a symmetry can be set in order to accelerate the assembly process. Possible strings are ``symm`` (symmetric), ``asym`` (anti-symmetric) and ``diag`` (diagonal). 2. ``None`` : all blocks are allocated, disregarding zero-blocks or any symmetry. 3. ``1D list`` : 1d list consisting of either a) strings or b) matrices (3x3 callables or 3x3 list) and can be mixed. Predefined names are ``G``, ``Ginv``, ``DFinv``, ``sqrt_g``. Access them using strings in the 1d list: ``weights=['']``. Possible choices for key-value pairs in **weights** are, at the moment: ``eq_mhd``: :class:`~struphy.fields_background.base.MHDequilibrium`. To access them, use for ```` the string ``eq_``, where ```` can be found in the just mentioned base classes for MHD equilibria. By default, all scalars are multiplied. For division of scalars use ``1/``. - 4. ``2D list`` : 2d list with the same number of rows/columns as the number of components of the domain/codomain spaces. The entries can be either a) callables or b) xp.ndarrays representing the weights at the quadrature points. If an entry is zero or ``None``, the corresponding block is set to ``None`` to accelerate the dot product. + 4. ``2D list`` : 2d list with the same number of rows/columns as the number of components of the domain/codomain spaces. The entries can be either a) callables or b) np.ndarrays representing the weights at the quadrature points. If an entry is zero or ``None``, the corresponding block is set to ``None`` to accelerate the dot product. assemble: bool Whether to assemble the weighted mass matrix, i.e. computes the integrals with @@ -905,7 +897,7 @@ def DFinvT(e1, e2, e3): if weights_rank2: # if matrix exits fun = [] - if listinput and len(weights_rank2) == 1: + if listinput == True and len(weights_rank2) == 1: for m in range(3): fun += [[]] for n in range(3): @@ -918,11 +910,7 @@ def DFinvT(e1, e2, e3): for n in range(3): fun[-1] += [ lambda e1, e2, e3, m=m, n=n: self._matrix_operate(e1, e2, e3, *weights_rank2)[ - :, - :, - :, - m, - n, + :, :, :, m, n ], ] # Scalar operations second @@ -949,14 +937,14 @@ def DFinvT(e1, e2, e3): fun = [ [ lambda e1, e2, e3: 1.0 / weights_rank0[0](e1, e2, e3), - ], + ] ] for f2, op in zip(weights_rank0[1:], operations[1:]): fun = [ [ lambda e1, e2, e3, f=fun[0][0], op=op, f2=f2: self._operate(f, f2, op, e1, e2, e3), - ], + ] ] V_id = self.derham.space_to_form[V_id] @@ -990,11 +978,11 @@ def _get_range_rank(self, func): else: dummy_eta = (0.0, 0.0, 0.0) val = func(*dummy_eta) - assert isinstance(val, xp.ndarray) + assert isinstance(val, np.ndarray) out = len(val.shape) - 3 else: if isinstance(func, list): - if isinstance(func[0], xp.ndarray): + if isinstance(func[0], np.ndarray): out = 2 else: out = len(func) - 1 @@ -1037,92 +1025,6 @@ def _operate(self, f1, f2, op, e1, e2, e3): return out - ####################################### - # Aux classes (to be removed in TODO) # - ####################################### - class H1vecMassMatrix_density: - """Wrapper around a Weighted mass operator from H1vec to H1vec whose weights are given by a 3 form""" - - def __init__(self, derham, mass_ops, domain): - self._massop = mass_ops.create_weighted_mass("H1vec", "H1vec") - self.field = derham.create_spline_function("field", "L2") - - integration_grid = [grid_1d.flatten() for grid_1d in derham.quad_grid_pts["0"]] - - self.integration_grid_spans, self.integration_grid_bn, self.integration_grid_bd = ( - derham.prepare_eval_tp_fixed( - integration_grid, - ) - ) - - grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._f_values = xp.zeros(grid_shape, dtype=float) - - metric = domain.metric(*integration_grid) - self._mass_metric_term = deepcopy(metric) - self._full_term_mass = deepcopy(metric) - - @property - def massop( - self, - ): - """The WeightedMassOperator""" - return self._massop - - @property - def inv( - self, - ): - """The inverse WeightedMassOperator""" - if not hasattr(self, "_inv"): - self._create_inv() - return self._inv - - def update_weight(self, coeffs): - """Update the weighted mass matrix operator""" - - self.field.vector = coeffs - f_values = self.field.eval_tp_fixed_loc( - self.integration_grid_spans, - self.integration_grid_bd, - out=self._f_values, - ) - for i in range(3): - for j in range(3): - self._full_term_mass[i, j] = f_values * self._mass_metric_term[i, j] - - self._massop.assemble( - [ - [self._full_term_mass[0, 0], self._full_term_mass[0, 1], self._full_term_mass[0, 2]], - [ - self._full_term_mass[1, 0], - self._full_term_mass[ - 1, - 1, - ], - self._full_term_mass[1, 2], - ], - [self._full_term_mass[2, 0], self._full_term_mass[2, 1], self._full_term_mass[2, 2]], - ], - verbose=False, - ) - - if hasattr(self, "_inv") and self.inv._options["pc"] is not None: - self.inv._options["pc"].update_mass_operator(self.massop) - - def _create_inv(self, type="pcg", tol=1e-16, maxiter=500, verbose=False): - """Inverse the weighted mass matrix, preconditioner must be set outside - via self._inv._options['pc'] = ...""" - self._inv = inverse( - self.massop, - type, - pc=None, - tol=tol, - maxiter=maxiter, - verbose=verbose, - recycle=True, - ) - class WeightedMassOperatorsOldForTesting: r""" @@ -1630,7 +1532,7 @@ def M2B_div0(self): self.weights[self.selected_weight].a1_1, self.weights[self.selected_weight].a1_2, self.weights[self.selected_weight].a1_3, - ], + ] ) tmp_b2 = self.derham.curl.dot(a_eq) @@ -1744,7 +1646,7 @@ def M2Bn(self): self.weights[self.selected_weight].a1_1, self.weights[self.selected_weight].a1_2, self.weights[self.selected_weight].a1_3, - ], + ] ) tmp_b2 = self.derham.curl.dot(a_eq) @@ -1865,11 +1767,7 @@ def M1perp(self): for n in range(3): fun[-1] += [ lambda e1, e2, e3, m=m, n=n: (self.DFinv(e1, e2, e3) @ self.D @ self.DFinv(e1, e2, e3))[ - :, - :, - :, - m, - n, + :, :, :, m, n ] * self.sqrt_g( e1, @@ -1906,7 +1804,7 @@ def M0ad(self): e3, ) * self.sqrt_g(e1, e2, e3), - ], + ] ] self._M0ad = self._assemble_weighted_mass( @@ -2091,7 +1989,7 @@ class WeightedMassOperator(LinOpWithTransp): 1. ``None`` : all blocks are allocated, disregarding zero-blocks or any symmetry. 2. ``str`` : for square block matrices (V=W), a symmetry can be set in order to accelerate the assembly process. Possible strings are ``symm`` (symmetric), ``asym`` (anti-symmetric) and ``diag`` (diagonal). - 3. ``list`` : 2d list with the same number of rows/columns as the number of components of the domain/codomain spaces. The entries can be either a) callables or b) xp.ndarrays representing the weights at the quadrature points. If an entry is zero or ``None``, the corresponding block is set to ``None`` to accelerate the dot product. + 3. ``list`` : 2d list with the same number of rows/columns as the number of components of the domain/codomain spaces. The entries can be either a) callables or b) np.ndarrays representing the weights at the quadrature points. If an entry is zero or ``None``, the corresponding block is set to ``None`` to accelerate the dot product. transposed : bool Whether to assemble the transposed operator. @@ -2367,22 +2265,21 @@ def __init__( pts = [ quad_grid[nquad].points.flatten() for quad_grid, nquad in zip( - self.derham.get_quad_grids(wspace, nquads=self.nquads), - self.nquads, + self.derham.get_quad_grids(wspace, nquads=self.nquads), self.nquads ) ] if callable(weights_info[a][b]): - PTS = xp.meshgrid(*pts, indexing="ij") + PTS = np.meshgrid(*pts, indexing="ij") mat_w = weights_info[a][b](*PTS).copy() - elif isinstance(weights_info[a][b], xp.ndarray): + elif isinstance(weights_info[a][b], np.ndarray): mat_w = weights_info[a][b] assert mat_w.shape == tuple( [pt.size for pt in pts], ) - if xp.any(xp.abs(mat_w) > 1e-14): + if np.any(np.abs(mat_w) > 1e-14): if self._matrix_free: blocks[-1] += [ StencilMatrixFreeMassOperator( @@ -2482,7 +2379,7 @@ def __init__( getattr( mass_kernels, "kernel_" + str(self._V.ldim) + "d_mat", - ), + ) ) @property @@ -2518,10 +2415,10 @@ def tosparse(self): if all(op is None for op in (self._W_extraction_op, self._V_extraction_op)): for bl in self._V_boundary_op.bc: for bc in bl: - assert not bc, print(".tosparse() only works without boundary conditions at the moment") + assert bc == False, print(".tosparse() only works without boundary conditions at the moment") for bl in self._W_boundary_op.bc: for bc in bl: - assert not bc, print(".tosparse() only works without boundary conditions at the moment") + assert bc == False, print(".tosparse() only works without boundary conditions at the moment") return self._mat.tosparse() elif all(isinstance(op, IdentityOperator) for op in (self._W_extraction_op, self._V_extraction_op)): @@ -2534,10 +2431,10 @@ def toarray(self): if all(op is None for op in (self._W_extraction_op, self._V_extraction_op)): for bl in self._V_boundary_op.bc: for bc in bl: - assert not bc, print(".toarray() only works without boundary conditions at the moment") + assert bc == False, print(".toarray() only works without boundary conditions at the moment") for bl in self._W_boundary_op.bc: for bc in bl: - assert not bc, print(".toarray() only works without boundary conditions at the moment") + assert bc == False, print(".toarray() only works without boundary conditions at the moment") return self._mat.toarray() elif all(isinstance(op, IdentityOperator) for op in (self._W_extraction_op, self._V_extraction_op)): @@ -2698,7 +2595,7 @@ def assemble(self, weights=None, clear=True, verbose=True): Parameters ---------- weights : list | NoneType - Weight function(s) (callables or xp.ndarrays) in a 2d list of shape corresponding to + Weight function(s) (callables or np.ndarrays) in a 2d list of shape corresponding to number of components of domain/codomain. If ``weights=None``, the weight is taken from the given weights in the instanziation of the object, else it will be overriden. @@ -2721,7 +2618,7 @@ def assemble(self, weights=None, clear=True, verbose=True): if weight is not None: assert callable(weight) or isinstance( weight, - xp.ndarray, + np.ndarray, ) self._mat[a, b].weights = weight @@ -2788,8 +2685,7 @@ def assemble(self, weights=None, clear=True, verbose=True): codomain_spans = [ quad_grid[nquad].spans for quad_grid, nquad in zip( - self.derham.get_quad_grids(codomain_space, nquads=self.nquads), - self.nquads, + self.derham.get_quad_grids(codomain_space, nquads=self.nquads), self.nquads ) ] @@ -2802,8 +2698,7 @@ def assemble(self, weights=None, clear=True, verbose=True): pts = [ quad_grid[nquad].points.flatten() for quad_grid, nquad in zip( - self.derham.get_quad_grids(codomain_space, nquads=self.nquads), - self.nquads, + self.derham.get_quad_grids(codomain_space, nquads=self.nquads), self.nquads ) ] wts = [ @@ -2818,8 +2713,7 @@ def assemble(self, weights=None, clear=True, verbose=True): codomain_basis = [ quad_grid[nquad].basis for quad_grid, nquad in zip( - self.derham.get_quad_grids(codomain_space, nquads=self.nquads), - self.nquads, + self.derham.get_quad_grids(codomain_space, nquads=self.nquads), self.nquads ) ] @@ -2834,13 +2728,13 @@ def assemble(self, weights=None, clear=True, verbose=True): # evaluate weight at quadrature points if callable(loc_weight): - PTS = xp.meshgrid(*pts, indexing="ij") + PTS = np.meshgrid(*pts, indexing="ij") mat_w = loc_weight(*PTS).copy() - elif isinstance(loc_weight, xp.ndarray): + elif isinstance(loc_weight, np.ndarray): mat_w = loc_weight elif loc_weight is not None: raise TypeError( - "weights must be callable or xp.ndarray or None but is {}".format( + "weights must be callable or np.ndarray or None but is {}".format( type(self._weights[a][b]), ), ) @@ -2848,8 +2742,8 @@ def assemble(self, weights=None, clear=True, verbose=True): if loc_weight is not None: assert mat_w.shape == tuple([pt.size for pt in pts]) - not_weight_zero = xp.array( - int(loc_weight is not None and xp.any(xp.abs(mat_w) > 1e-14)), + not_weight_zero = np.array( + int(loc_weight is not None and np.any(np.abs(mat_w) > 1e-14)), ) if self._mpi_comm is not None: self._mpi_comm.Allreduce( @@ -2862,8 +2756,7 @@ def assemble(self, weights=None, clear=True, verbose=True): domain_basis = [ quad_grid[nquad].basis for quad_grid, nquad in zip( - self.derham.get_quad_grids(domain_space, nquads=self.nquads), - self.nquads, + self.derham.get_quad_grids(domain_space, nquads=self.nquads), self.nquads ) ] @@ -2874,7 +2767,7 @@ def assemble(self, weights=None, clear=True, verbose=True): mat = self._mat if loc_weight is None: # in case it's none we still need to have zeros weights to call the kernel - mat_w = xp.zeros( + mat_w = np.zeros( tuple([pt.size for pt in pts]), ) else: @@ -3010,12 +2903,12 @@ def eval_quad(W, coeffs, out=None): coeffs : StencilVector | BlockVector The coefficient vector corresponding to the FEM field. Ghost regions must be up-to-date! - out : xp.ndarray | list/tuple of xp.ndarrays, optional + out : np.ndarray | list/tuple of np.ndarrays, optional If given, the result will be written into these arrays in-place. Number of outs must be compatible with number of components of FEM field. Returns ------- - out : xp.ndarray | list/tuple of xp.ndarrays + out : np.ndarray | list/tuple of np.ndarrays The values of the FEM field at the quadrature points. """ @@ -3034,7 +2927,7 @@ def eval_quad(W, coeffs, out=None): out = () if isinstance(W, TensorFemSpace): out += ( - xp.zeros( + np.zeros( [ q_grid[nquad].points.size for q_grid, nquad in zip(self.derham.get_quad_grids(W, nquads=self.nquads), self.nquads) @@ -3045,12 +2938,11 @@ def eval_quad(W, coeffs, out=None): else: for space in W.spaces: out += ( - xp.zeros( + np.zeros( [ q_grid[nquad].points.size for q_grid, nquad in zip( - self.derham.get_quad_grids(space, nquads=self.nquads), - self.nquads, + self.derham.get_quad_grids(space, nquads=self.nquads), self.nquads ) ], dtype=float, @@ -3059,7 +2951,7 @@ def eval_quad(W, coeffs, out=None): else: if isinstance(W, TensorFemSpace): - assert isinstance(out, xp.ndarray) + assert isinstance(out, np.ndarray) out = (out,) else: assert isinstance(out, (list, tuple)) @@ -3164,18 +3056,18 @@ def __init__(self, derham, V, W, weights=None, nquads=None): getattr( mass_kernels, "kernel_" + str(self._V.ldim) + "d_matrixfree", - ), + ) ) self._diag_kernel = Pyccelkernel( getattr( mass_kernels, "kernel_" + str(self._V.ldim) + "d_diag", - ), + ) ) shape = tuple(e - s + 1 for s, e in zip(V.coeff_space.starts, V.coeff_space.ends)) - self._diag_tmp = xp.zeros((shape)) + self._diag_tmp = np.zeros((shape)) # knot span indices of elements of local domain self._codomain_spans = [ @@ -3259,11 +3151,7 @@ def toarray(self): def transpose(self, conjugate=False): return StencilMatrixFreeMassOperator( - self._derham, - self._codomain, - self._domain, - self._weights, - nquads=self._nquads, + self._derham, self._codomain, self._domain, self._weights, nquads=self._nquads ) @property @@ -3306,16 +3194,16 @@ def dot(self, v, out=None): # evaluate weight at quadrature points if callable(self._weights): - PTS = xp.meshgrid(*self._pts, indexing="ij") + PTS = np.meshgrid(*self._pts, indexing="ij") mat_w = self._weights(*PTS).copy() - elif isinstance(self._weights, xp.ndarray): + elif isinstance(self._weights, np.ndarray): mat_w = self._weights if self._weights is not None: assert mat_w.shape == tuple([pt.size for pt in self._pts]) # call kernel (if mat_w is not zero) by calling the appropriate kernel (1d, 2d or 3d) - if xp.any(xp.abs(mat_w) > 1e-14): + if np.any(np.abs(mat_w) > 1e-14): self._dot_kernel( *self._codomain_spans, *self._domain_spans, @@ -3375,9 +3263,9 @@ def diagonal(self, inverse=False, sqrt=False, out=None): # evaluate weight at quadrature points if callable(self._weights): - PTS = xp.meshgrid(*self._pts, indexing="ij") + PTS = np.meshgrid(*self._pts, indexing="ij") mat_w = self._weights(*PTS).copy() - elif isinstance(self._weights, xp.ndarray): + elif isinstance(self._weights, np.ndarray): mat_w = self._weights diag = self._diag_tmp @@ -3397,12 +3285,12 @@ def diagonal(self, inverse=False, sqrt=False, out=None): # Calculate entries of StencilDiagonalMatrix if sqrt: - diag = xp.sqrt(diag) + diag = np.sqrt(diag) if inverse: - data = xp.divide(1, diag, out=data) + data = np.divide(1, diag, out=data) elif out: - xp.copyto(data, diag) + np.copyto(data, diag) else: data = diag.copy() diff --git a/src/struphy/feec/mass_kernels.py b/src/struphy/feec/mass_kernels.py index 7b4f09720..7e62b3248 100644 --- a/src/struphy/feec/mass_kernels.py +++ b/src/struphy/feec/mass_kernels.py @@ -341,9 +341,7 @@ def kernel_3d_mat( for iel2 in range(ne2): for iel3 in range(ne3): tmp_mat_fun[:, :, :] = mat_fun[ - iel1 * nq1 : (iel1 + 1) * nq1, - iel2 * nq2 : (iel2 + 1) * nq2, - iel3 * nq3 : (iel3 + 1) * nq3, + iel1 * nq1 : (iel1 + 1) * nq1, iel2 * nq2 : (iel2 + 1) * nq2, iel3 * nq3 : (iel3 + 1) * nq3 ] tmp_w1[:] = w1[iel1, :] @@ -602,9 +600,7 @@ def kernel_3d_matrixfree( for iel2 in range(ne2): for iel3 in range(ne3): tmp_mat_fun[:, :, :] = mat_fun[ - iel1 * nq1 : (iel1 + 1) * nq1, - iel2 * nq2 : (iel2 + 1) * nq2, - iel3 * nq3 : (iel3 + 1) * nq3, + iel1 * nq1 : (iel1 + 1) * nq1, iel2 * nq2 : (iel2 + 1) * nq2, iel3 * nq3 : (iel3 + 1) * nq3 ] tmp_w1[:] = w1[iel1, :] @@ -717,9 +713,7 @@ def kernel_3d_diag( for iel2 in range(ne2): for iel3 in range(ne3): tmp_mat_fun[:, :, :] = mat_fun[ - iel1 * nq1 : (iel1 + 1) * nq1, - iel2 * nq2 : (iel2 + 1) * nq2, - iel3 * nq3 : (iel3 + 1) * nq3, + iel1 * nq1 : (iel1 + 1) * nq1, iel2 * nq2 : (iel2 + 1) * nq2, iel3 * nq3 : (iel3 + 1) * nq3 ] tmp_w1[:] = w1[iel1, :] diff --git a/src/struphy/feec/preconditioner.py b/src/struphy/feec/preconditioner.py index dfa00df4c..b3a8744eb 100644 --- a/src/struphy/feec/preconditioner.py +++ b/src/struphy/feec/preconditioner.py @@ -1,4 +1,3 @@ -import cunumpy as xp from psydac.api.essential_bc import apply_essential_bc_stencil from psydac.ddm.cart import CartDecomposition, DomainDecomposition from psydac.fem.tensor import TensorFemSpace @@ -12,6 +11,7 @@ from struphy.feec.linear_operators import BoundaryOperator from struphy.feec.mass import WeightedMassOperator +from struphy.utils.arrays import xp as np class MassMatrixPreconditioner(LinearOperator): @@ -94,12 +94,12 @@ def fun(e): s = e.shape[0] newshape = tuple([1 if i != d else s for i in range(n_dims)]) f = e.reshape(newshape) - return xp.atleast_1d( + return np.atleast_1d( loc_weights( - *[xp.array(xp.full_like(f, 0.5)) if i != d else xp.array(f) for i in range(n_dims)], + *[np.array(np.full_like(f, 0.5)) if i != d else np.array(f) for i in range(n_dims)], ).squeeze(), ) - elif isinstance(loc_weights, xp.ndarray): + elif isinstance(loc_weights, np.ndarray): s = loc_weights.shape if d == 0: fun = loc_weights[:, s[1] // 2, s[2] // 2] @@ -108,14 +108,14 @@ def fun(e): elif d == 2: fun = loc_weights[s[0] // 2, s[1] // 2, :] elif loc_weights is None: - fun = lambda e: xp.ones(e.size, dtype=float) + fun = lambda e: np.ones(e.size, dtype=float) else: raise TypeError( - "weights needs to be callable, xp.ndarray or None but is{}".format(type(loc_weights)), + "weights needs to be callable, np.ndarray or None but is{}".format(type(loc_weights)), ) fun = [[fun]] else: - fun = [[lambda e: xp.ones(e.size, dtype=float)]] + fun = [[lambda e: np.ones(e.size, dtype=float)]] # get 1D FEM space (serial, not distributed) and quadrature order femspace_1d = femspaces[c].spaces[d] @@ -207,7 +207,7 @@ def fun(e): M_local = StencilMatrix(V_local, V_local) - row_indices, col_indices = xp.nonzero(M_arr) + row_indices, col_indices = np.nonzero(M_arr) for row_i, col_i in zip(row_indices, col_indices): # only consider row indices on process @@ -220,7 +220,7 @@ def fun(e): ] = M_arr[row_i, col_i] # check if stencil matrix was built correctly - assert xp.allclose(M_local.toarray()[s : e + 1], M_arr[s : e + 1]) + assert np.allclose(M_local.toarray()[s : e + 1], M_arr[s : e + 1]) matrixcells += [M_local.copy()] # ======================================================================================================= @@ -318,6 +318,11 @@ def solver(self): """KroneckerLinearSolver or BlockDiagonalSolver for exactly inverting the approximate mass matrix self.matrix.""" return self._solver + @property + def domain(self): + """The domain of the linear operator - an element of Vectorspace""" + return self._space + @property def codomain(self): """The codomain of the linear operator - an element of Vectorspace""" @@ -482,7 +487,7 @@ def __init__(self, mass_operator, apply_bc=True): # loop over spatial directions for d in range(n_dims): - fun = [[lambda e: xp.ones(e.size, dtype=float)]] + fun = [[lambda e: np.ones(e.size, dtype=float)]] # get 1D FEM space (serial, not distributed) and quadrature order femspace_1d = femspaces[c].spaces[d] @@ -574,7 +579,7 @@ def __init__(self, mass_operator, apply_bc=True): M_local = StencilMatrix(V_local, V_local) - row_indices, col_indices = xp.nonzero(M_arr) + row_indices, col_indices = np.nonzero(M_arr) for row_i, col_i in zip(row_indices, col_indices): # only consider row indices on process @@ -587,7 +592,7 @@ def __init__(self, mass_operator, apply_bc=True): ] = M_arr[row_i, col_i] # check if stencil matrix was built correctly - assert xp.allclose(M_local.toarray()[s : e + 1], M_arr[s : e + 1]) + assert np.allclose(M_local.toarray()[s : e + 1], M_arr[s : e + 1]) matrixcells += [M_local.copy()] # ======================================================================================================= @@ -671,7 +676,7 @@ def __init__(self, mass_operator, apply_bc=True): # Need to assemble the logical mass matrix to extract the coefficients fun = [ - [lambda e1, e2, e3: xp.ones_like(e1, dtype=float) if i == j else None for j in range(3)] for i in range(3) + [lambda e1, e2, e3: np.ones_like(e1, dtype=float) if i == j else None for j in range(3)] for i in range(3) ] log_M = WeightedMassOperator( self._mass_operator.derham, @@ -699,6 +704,9 @@ def matrix(self): def solver(self): """KroneckerLinearSolver or BlockDiagonalSolver for exactly inverting the approximate mass matrix self.matrix.""" return self._solver + + @property + def domain(self): """The domain of the linear operator - an element of Vectorspace""" return self._space @@ -856,15 +864,15 @@ class FFTSolver(BandedSolver): Parameters ---------- - circmat : xp.ndarray + circmat : np.ndarray Generic circulant matrix. """ def __init__(self, circmat): - assert isinstance(circmat, xp.ndarray) + assert isinstance(circmat, np.ndarray) assert is_circulant(circmat) - self._space = xp.ndarray + self._space = np.ndarray self._column = circmat[:, 0] # -------------------------------------- @@ -881,13 +889,13 @@ def solve(self, rhs, out=None, transposed=False): Parameters ---------- - rhs : xp.ndarray + rhs : np.ndarray The right-hand sides to solve for. The vectors are assumed to be given in C-contiguous order, i.e. if multiple right-hand sides are given, then rhs is a two-dimensional array with the 0-th index denoting the number of the right-hand side, and the 1-st index denoting the element inside a right-hand side. - out : xp.ndarray, optional + out : np.ndarray, optional Output vector. If given, it has to have the same shape and datatype as rhs. transposed : bool @@ -905,9 +913,9 @@ def solve(self, rhs, out=None, transposed=False): try: out[:] = solve_circulant(self._column, rhs.T).T - except xp.linalg.LinAlgError: + except np.linalg.LinAlgError: eps = 1e-4 - print(f"Stabilizing singular preconditioning FFTSolver with {eps =}:") + print(f"Stabilizing singular preconditioning FFTSolver with {eps = }:") self._column[0] *= 1.0 + eps out[:] = solve_circulant(self._column, rhs.T).T @@ -929,13 +937,13 @@ def is_circulant(mat): Whether the matrix is circulant (=True) or not (=False). """ - assert isinstance(mat, xp.ndarray) + assert isinstance(mat, np.ndarray) assert len(mat.shape) == 2 assert mat.shape[0] == mat.shape[1] if mat.shape[0] > 1: for i in range(mat.shape[0] - 1): - circulant = xp.allclose(mat[i, :], xp.roll(mat[i + 1, :], -1)) + circulant = np.allclose(mat[i, :], np.roll(mat[i + 1, :], -1)) if not circulant: return circulant else: diff --git a/src/struphy/feec/projectors.py b/src/struphy/feec/projectors.py index be56cc722..1e9421c7e 100644 --- a/src/struphy/feec/projectors.py +++ b/src/struphy/feec/projectors.py @@ -1,4 +1,3 @@ -import cunumpy as xp from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL from psydac.ddm.mpi import mpi as MPI from psydac.feec.global_projectors import GlobalProjector @@ -38,6 +37,7 @@ from struphy.kernel_arguments.local_projectors_args_kernels import LocalProjectorsArguments from struphy.polar.basic import PolarVector from struphy.polar.linear_operators import PolarExtractionOperator +from struphy.utils.arrays import xp as np class CommutingProjector: @@ -80,11 +80,7 @@ class CommutingProjector: """ def __init__( - self, - projector_tensor: GlobalProjector, - dofs_extraction_op=None, - base_extraction_op=None, - boundary_op=None, + self, projector_tensor: GlobalProjector, dofs_extraction_op=None, base_extraction_op=None, boundary_op=None ): self._projector_tensor = projector_tensor @@ -580,24 +576,24 @@ class CommutingProjectorLocal: fem_space : FemSpace FEEC space into which the functions shall be projected. - pts : list of xp.array + pts : list of np.array 3-list (or nested 3-list[3-list] for BlockVectors) of 2D arrays with the quasi-interpolation points (or Gauss-Legendre quadrature points for histopolation). In format [spatial direction](B-spline index, point) for StencilVector spaces or [vector component][spatial direction](B-spline index, point) for BlockVector spaces. - wts : list of xp.array + wts : list of np.array 3D (4D for BlockVectors) list of 2D array with the Gauss-Legendre quadrature weights (full of ones for interpolation). In format [spatial direction](B-spline index, point) for StencilVector spaces or [vector component][spatial direction](B-spline index, point) for BlockVector spaces. - wij : list of xp.array + wij : list of np.array List of 2D arrays for the coefficients :math:`\omega_j^i` obtained by inverting the local collocation matrix. Use for obtaining the FE coefficients of a function via interpolation. In format [spatial direction](B-spline index, point). - whij : list of xp.array + whij : list of np.array List of 2D arrays for the coefficients :math:`\hat{\omega}_j^i` obtained from the :math:`\omega_j^i`. Use for obtaining the FE coefficients of a function via histopolation. In format [spatial direction](D-spline index, point). @@ -643,22 +639,22 @@ def __init__( # FE space of zero forms. That means that we have B-splines in all three spatial directions. Bspaces_1d = [fem_space_B.spaces] - self._B_nbasis = xp.array([space.nbasis for space in Bspaces_1d[0]]) + self._B_nbasis = np.array([space.nbasis for space in Bspaces_1d[0]]) # Degree of the B-spline space, not to be confused with the degrees given by fem_space.spaces.degree since depending on the situation it will give the D-spline degree instead - self._p = xp.zeros(3, dtype=int) + self._p = np.zeros(3, dtype=int) for i, space in enumerate(fem_space_B.spaces): self._p[i] = space.degree # FE space of three forms. That means that we have D-splines in all three spatial directions. Dspaces_1d = [fem_space_D.spaces] - D_nbasis = xp.array([space.nbasis for space in Dspaces_1d[0]]) + D_nbasis = np.array([space.nbasis for space in Dspaces_1d[0]]) self._periodic = [] for space in fem_space.spaces: self._periodic.append(space.periodic) - self._periodic = xp.array(self._periodic) + self._periodic = np.array(self._periodic) if isinstance(fem_space, TensorFemSpace): # The comm, rank and size are only necessary for debugging. In particular, for printing stuff @@ -671,21 +667,21 @@ def __init__( self._size = self._comm.Get_size() # We get the start and endpoint for each sublist in out - self._starts = xp.array(self.coeff_space.starts) - self._ends = xp.array(self.coeff_space.ends) + self._starts = np.array(self.coeff_space.starts) + self._ends = np.array(self.coeff_space.ends) # We compute the number of FE coefficients the current MPI rank is responsible for - self._loc_num_coeff = xp.array([self._ends[i] + 1 - self._starts[i] for i in range(3)], dtype=int) + self._loc_num_coeff = np.array([self._ends[i] + 1 - self._starts[i] for i in range(3)], dtype=int) # We get the pads - self._pds = xp.array(self.coeff_space.pads) + self._pds = np.array(self.coeff_space.pads) # We get the number of spaces we have self._nsp = 1 self._localpts = [] self._index_translation = [] self._inv_index_translation = [] - self._original_pts_size = xp.zeros((3), dtype=int) + self._original_pts_size = np.zeros((3), dtype=int) elif isinstance(fem_space, VectorFemSpace): # The comm, rank and size are only necessary for debugging. In particular, for printing stuff @@ -698,17 +694,17 @@ def __init__( self._size = self._comm.Get_size() # we collect all starts and ends in two big lists - self._starts = xp.array([vi.starts for vi in self.coeff_space.spaces]) - self._ends = xp.array([vi.ends for vi in self.coeff_space.spaces]) + self._starts = np.array([vi.starts for vi in self.coeff_space.spaces]) + self._ends = np.array([vi.ends for vi in self.coeff_space.spaces]) # We compute the number of FE coefficients the current MPI rank is responsible for - self._loc_num_coeff = xp.array( + self._loc_num_coeff = np.array( [[self._ends[h][i] + 1 - self._starts[h][i] for i in range(3)] for h in range(3)], dtype=int, ) # We collect the pads - self._pds = xp.array([vi.pads for vi in self.coeff_space.spaces]) + self._pds = np.array([vi.pads for vi in self.coeff_space.spaces]) # We get the number of space we have self._nsp = len(self.coeff_space.spaces) @@ -724,7 +720,7 @@ def __init__( self._localpts = [[], [], []] # Here we will store the global number of points for each block entry and for each spatial direction. - self._original_pts_size = [xp.zeros((3), dtype=int), xp.zeros((3), dtype=int), xp.zeros((3), dtype=int)] + self._original_pts_size = [np.zeros((3), dtype=int), np.zeros((3), dtype=int), np.zeros((3), dtype=int)] # This will be a list of three elements (the first one for the first block element, the second one for the second block element, ...), each one being a list with three arrays, # each array will contain the B-spline indices of the corresponding spatial direction for which this MPI rank has to store at least one non-zero FE coefficient for the storage of the @@ -744,33 +740,33 @@ def __init__( self._are_zero_block_B_or_D_splines = [[], [], []] # self._Basis_function_indices_agreggated_B[i][j] = -1 if the jth B-spline is not necessary for any of the three block entries in the ith spatial direction, otherwise it is 0 - self._Basis_function_indices_agreggated_B = [-1 * xp.ones(nbasis, dtype=int) for nbasis in self._B_nbasis] - self._Basis_function_indices_agreggated_D = [-1 * xp.ones(nbasis, dtype=int) for nbasis in D_nbasis] + self._Basis_function_indices_agreggated_B = [-1 * np.ones(nbasis, dtype=int) for nbasis in self._B_nbasis] + self._Basis_function_indices_agreggated_D = [-1 * np.ones(nbasis, dtype=int) for nbasis in D_nbasis] # List that will contain the LocalProjectorsArguments for each value of h = 0,1,2. self._solve_args = [] else: - raise TypeError(f"{fem_space =} is not of type FemSpace.") + raise TypeError(f"{fem_space = } is not of type FemSpace.") if isinstance(fem_space, TensorFemSpace): if space_id == "H1": # List of list that tell us for each spatial direction whether we have Interpolation or Histopolation. IoH_for_indices = ["I", "I", "I"] # Same list as before but with bools instead of chars - self._IoH = xp.array([False, False, False], dtype=bool) + self._IoH = np.array([False, False, False], dtype=bool) # We make a list with the interpolation/histopolation weights we need for each block and each direction. self._geo_weights = [self._wij[0], self._wij[1], self._wij[2]] elif space_id == "L2": IoH_for_indices = ["H", "H", "H"] - self._IoH = xp.array([True, True, True], dtype=bool) + self._IoH = np.array([True, True, True], dtype=bool) self._geo_weights = [self._whij[0], self._whij[1], self._whij[2]] lenj1, lenj2, lenj3 = get_local_problem_size(self._periodic, self._p, self._IoH) lenj = [lenj1, lenj2, lenj3] - self._shift = xp.array([0, 0, 0], dtype=int) + self._shift = np.array([0, 0, 0], dtype=int) compute_shifts(self._IoH, self._p, self._B_nbasis, self._shift) split_points( @@ -792,7 +788,7 @@ def __init__( ) # We want to build the meshgrid for the evaluation of the degrees of freedom so it only contains the evaluation points that each specific MPI rank is actually going to use. - self._meshgrid = xp.meshgrid( + self._meshgrid = np.meshgrid( *[pt for pt in self._localpts], indexing="ij", ) @@ -931,18 +927,18 @@ def __init__( ) elif isinstance(fem_space, VectorFemSpace): - self._shift = [xp.array([0, 0, 0], dtype=int) for _ in range(3)] + self._shift = [np.array([0, 0, 0], dtype=int) for _ in range(3)] if space_id == "H1vec": # List of list that tell us for each block entry and for each spatial direction whether we have Interpolation or Histopolation. IoH_for_indices = [["I", "I", "I"], ["I", "I", "I"], ["I", "I", "I"]] # Same list as before but with bools instead of chars self._IoH = [ - xp.array([False, False, False], dtype=bool), - xp.array( + np.array([False, False, False], dtype=bool), + np.array( [False, False, False], dtype=bool, ), - xp.array([False, False, False], dtype=bool), + np.array([False, False, False], dtype=bool), ] # We make a list with the interpolation/histopolation weights we need for each block and each direction. self._geo_weights = [[self._wij[0], self._wij[1], self._wij[2]] for _ in range(3)] @@ -950,12 +946,12 @@ def __init__( elif space_id == "Hcurl": IoH_for_indices = [["H", "I", "I"], ["I", "H", "I"], ["I", "I", "H"]] self._IoH = [ - xp.array([True, False, False], dtype=bool), - xp.array( + np.array([True, False, False], dtype=bool), + np.array( [False, True, False], dtype=bool, ), - xp.array([False, False, True], dtype=bool), + np.array([False, False, True], dtype=bool), ] self._geo_weights = [ [self._whij[0], self._wij[1], self._wij[2]], @@ -970,12 +966,12 @@ def __init__( elif space_id == "Hdiv": IoH_for_indices = [["I", "H", "H"], ["H", "I", "H"], ["H", "H", "I"]] self._IoH = [ - xp.array([False, True, True], dtype=bool), - xp.array( + np.array([False, True, True], dtype=bool), + np.array( [True, False, True], dtype=bool, ), - xp.array([True, True, False], dtype=bool), + np.array([True, True, False], dtype=bool), ] self._geo_weights = [ [self._wij[0], self._whij[1], self._whij[2]], @@ -1014,7 +1010,7 @@ def __init__( # meshgrid for h component self._meshgrid.append( - xp.meshgrid( + np.meshgrid( *[pt for pt in self._localpts[h]], indexing="ij", ), @@ -1332,9 +1328,9 @@ def solve_weighted(self, rhs, out=None): if isinstance(self._fem_space, TensorFemSpace): if out is None: - out = xp.zeros((self._loc_num_coeff[0], self._loc_num_coeff[1], self._loc_num_coeff[2]), dtype=float) + out = np.zeros((self._loc_num_coeff[0], self._loc_num_coeff[1], self._loc_num_coeff[2]), dtype=float) else: - assert xp.shape(out) == (self._loc_num_coeff[0], self._loc_num_coeff[1], self._loc_num_coeff[2]) + assert np.shape(out) == (self._loc_num_coeff[0], self._loc_num_coeff[1], self._loc_num_coeff[2]) solve_local_main_loop_weighted( self._solve_args, @@ -1356,7 +1352,7 @@ def solve_weighted(self, rhs, out=None): out = [] for h in range(3): out.append( - xp.zeros( + np.zeros( ( self._loc_num_coeff[h][0], self._loc_num_coeff[h][1], @@ -1369,7 +1365,7 @@ def solve_weighted(self, rhs, out=None): else: assert len(out) == 3 for h in range(3): - assert xp.shape(out[h]) == ( + assert np.shape(out[h]) == ( self._loc_num_coeff[h][0], self._loc_num_coeff[h][1], self._loc_num_coeff[h][2], @@ -1379,7 +1375,7 @@ def solve_weighted(self, rhs, out=None): # the out block for which do_nothing tell us before hand they shall be zero. for h in range(3): if self._do_nothing[h] == 1: - out[h] = xp.zeros( + out[h] = np.zeros( ( self._loc_num_coeff[h][0], self._loc_num_coeff[h][1], @@ -1429,12 +1425,12 @@ def get_dofs(self, fun, dofs=None): fh = fun(*self._meshgrid[h])[h] # Case in which fun is a list of three functions, each one with one output. else: - assert len(fun) == 3, f"List input only for vector-valued spaces of size 3, but {len(fun) =}." + assert len(fun) == 3, f"List input only for vector-valued spaces of size 3, but {len(fun) = }." # Evaluation of the function to compute the h component fh = fun[h](*self._meshgrid[h]) # Array into which we will write the Dofs. - f_eval_aux = xp.zeros(tuple(xp.shape(dim)[0] for dim in self._localpts[h])) + f_eval_aux = np.zeros(tuple(np.shape(dim)[0] for dim in self._localpts[h])) # For 1-forms if self._space_key == "1": @@ -1446,7 +1442,7 @@ def get_dofs(self, fun, dofs=None): f_eval.append(f_eval_aux) elif self._space_key == "3": - f_eval = xp.zeros(tuple(xp.shape(dim)[0] for dim in self._localpts)) + f_eval = np.zeros(tuple(np.shape(dim)[0] for dim in self._localpts)) # Evaluation of the function at all Gauss-Legendre quadrature points faux = fun(*self._meshgrid) get_dofs_local_3_form(self._solve_args, faux, f_eval) @@ -1465,7 +1461,7 @@ def get_dofs(self, fun, dofs=None): fun, ) == 3 - ), f"List input only for vector-valued spaces of size 3, but {len(fun) =}." + ), f"List input only for vector-valued spaces of size 3, but {len(fun) = }." for h in range(3): f_eval.append(fun[h](*self._meshgrid[h])) @@ -1481,26 +1477,26 @@ def get_dofs_weighted(self, fun, dofs=None, first_go=True, pre_computed_dofs=Non Builds 3D numpy array with the evaluation of the right-hand-side. """ if self._space_key == "0": - if first_go: + if first_go == True: pre_computed_dofs = [fun(*self._meshgrid)] elif self._space_key == "1" or self._space_key == "2": - assert len(fun) == 3, f"List input only for vector-valued spaces of size 3, but {len(fun) =}." + assert len(fun) == 3, f"List input only for vector-valued spaces of size 3, but {len(fun) = }." - self._do_nothing = xp.zeros(3, dtype=int) + self._do_nothing = np.zeros(3, dtype=int) f_eval = [] # If this is the first time this rank has to evaluate the weights degrees of freedom we declare the list where to store them. - if first_go: + if first_go == True: pre_computed_dofs = [] for h in range(3): # Evaluation of the function to compute the h component - if first_go: + if first_go == True: pre_computed_dofs.append(fun[h](*self._meshgrid[h])) # Array into which we will write the Dofs. - f_eval_aux = xp.zeros(tuple(xp.shape(dim)[0] for dim in self._localpts[h])) + f_eval_aux = np.zeros(tuple(np.shape(dim)[0] for dim in self._localpts[h])) # We check if the current set of basis functions is not one of those we have to compute in the current MPI rank. if ( @@ -1545,9 +1541,9 @@ def get_dofs_weighted(self, fun, dofs=None, first_go=True, pre_computed_dofs=Non f_eval.append(f_eval_aux) elif self._space_key == "3": - f_eval = xp.zeros(tuple(xp.shape(dim)[0] for dim in self._localpts)) + f_eval = np.zeros(tuple(np.shape(dim)[0] for dim in self._localpts)) # Evaluation of the function at all Gauss-Legendre quadrature points - if first_go: + if first_go == True: pre_computed_dofs = [fun(*self._meshgrid)] get_dofs_local_3_form_weighted( @@ -1565,9 +1561,9 @@ def get_dofs_weighted(self, fun, dofs=None, first_go=True, pre_computed_dofs=Non ) elif self._space_key == "v": - assert len(fun) == 3, f"List input only for vector-valued spaces of size 3, but {len(fun) =}." + assert len(fun) == 3, f"List input only for vector-valued spaces of size 3, but {len(fun) = }." - self._do_nothing = xp.zeros(3, dtype=int) + self._do_nothing = np.zeros(3, dtype=int) for h in range(3): # We check if the current set of basis functions is not one of those we have to compute in the current MPI rank. if ( @@ -1578,7 +1574,7 @@ def get_dofs_weighted(self, fun, dofs=None, first_go=True, pre_computed_dofs=Non # We should do nothing here self._do_nothing[h] = 1 - if first_go: + if first_go == True: f_eval = [] for h in range(3): f_eval.append(fun[h](*self._meshgrid[h])) @@ -1588,7 +1584,7 @@ def get_dofs_weighted(self, fun, dofs=None, first_go=True, pre_computed_dofs=Non "Uknown space. It must be either H1, Hcurl, Hdiv, L2 or H1vec.", ) - if first_go: + if first_go == True: if self._space_key == "0": return pre_computed_dofs[0], pre_computed_dofs elif self._space_key == "v": @@ -1645,23 +1641,23 @@ def __call__( set to false it means we computed it once already and we can reuse the dofs evaluation of the weights instead of recomputing them. - pre_computed_dofs : list of xp.arrays + pre_computed_dofs : list of np.arrays If we have already computed the evaluation of the weights at the dofs we can pass the arrays with their values here, so we do not have to compute them again. Returns ------- - coeffs : psydac.linalg.basic.vector | xp.array 3D + coeffs : psydac.linalg.basic.vector | np.array 3D The FEM spline coefficients after projection. """ - if not weighted: + if weighted == False: return self.solve(self.get_dofs(fun, dofs=dofs), out=out) else: # We set B_or_D and basis_indices as attributes of the projectors so we can easily access them in the get_rowstarts, get_rowends and get_values functions, where they are needed. self._B_or_D = B_or_D self._basis_indices = basis_indices - if first_go: + if first_go == True: # rhs contains the evaluation over the degrees of freedom of the weights multiplied by the basis function # rhs_weights contains the evaluation over the degrees of freedom of only the weights rhs, rhs_weights = self.get_dofs_weighted( @@ -1672,8 +1668,7 @@ def __call__( return self.solve_weighted(rhs, out=out), rhs_weights else: return self.solve_weighted( - self.get_dofs_weighted(fun, dofs=dofs, first_go=False, pre_computed_dofs=pre_computed_dofs), - out=out, + self.get_dofs_weighted(fun, dofs=dofs, first_go=False, pre_computed_dofs=pre_computed_dofs), out=out ) def get_translation_b(self, i, h): @@ -1863,7 +1858,7 @@ def __init__(self, space_id, mass_ops, **params): self._quad_grid_pts = self.mass_ops.derham.quad_grid_pts[self.space_key] if space_id in ("H1", "L2"): - self._quad_grid_mesh = xp.meshgrid( + self._quad_grid_mesh = np.meshgrid( *[pt.flatten() for pt in self.quad_grid_pts], indexing="ij", ) @@ -1873,12 +1868,12 @@ def __init__(self, space_id, mass_ops, **params): self._tmp = [] # tmp for matrix-vector product of geom_weights with fun for pts in self.quad_grid_pts: self._quad_grid_mesh += [ - xp.meshgrid( + np.meshgrid( *[pt.flatten() for pt in pts], indexing="ij", ), ] - self._tmp += [xp.zeros_like(self.quad_grid_mesh[-1][0])] + self._tmp += [np.zeros_like(self.quad_grid_mesh[-1][0])] # geometric weights evaluated at quadrature grid self._geom_weights = [] # loop over rows (different meshes) @@ -1889,7 +1884,7 @@ def __init__(self, space_id, mass_ops, **params): if weight is not None: self._geom_weights[-1] += [weight(*mesh)] else: - self._geom_weights[-1] += [xp.zeros_like(mesh[0])] + self._geom_weights[-1] += [np.zeros_like(mesh[0])] # other quad grid info if isinstance(self.space, TensorFemSpace): @@ -2015,7 +2010,7 @@ def get_dofs(self, fun, dofs=None, apply_bc=False, clear=True): Parameters ---------- fun : callable | list - Weight function(s) (callables or xp.ndarrays) in a 1d list of shape corresponding to number of components. + Weight function(s) (callables or np.ndarrays) in a 1d list of shape corresponding to number of components. dofs : StencilVector | BlockVector, optional The vector for the output. @@ -2030,9 +2025,9 @@ def get_dofs(self, fun, dofs=None, apply_bc=False, clear=True): # evaluate fun at quad_grid or check array size if callable(fun): fun_weights = fun(*self._quad_grid_mesh) - elif isinstance(fun, xp.ndarray): + elif isinstance(fun, np.ndarray): assert fun.shape == self._quad_grid_mesh[0].shape, ( - f"Expected shape {self._quad_grid_mesh[0].shape}, got {fun.shape =} instead." + f"Expected shape {self._quad_grid_mesh[0].shape}, got {fun.shape = } instead." ) fun_weights = fun else: @@ -2041,7 +2036,7 @@ def get_dofs(self, fun, dofs=None, apply_bc=False, clear=True): fun, ) == 3 - ), f"List input only for vector-valued spaces of size 3, but {len(fun) =}." + ), f"List input only for vector-valued spaces of size 3, but {len(fun) = }." fun_weights = [] # loop over rows (different meshes) for mesh in self._quad_grid_mesh: @@ -2050,12 +2045,12 @@ def get_dofs(self, fun, dofs=None, apply_bc=False, clear=True): for f in fun: if callable(f): fun_weights[-1] += [f(*mesh)] - elif isinstance(f, xp.ndarray): - assert f.shape == mesh[0].shape, f"Expected shape {mesh[0].shape}, got {f.shape =} instead." + elif isinstance(f, np.ndarray): + assert f.shape == mesh[0].shape, f"Expected shape {mesh[0].shape}, got {f.shape = } instead." fun_weights[-1] += [f] else: raise ValueError( - f"Expected callable or numpy array, got {type(f) =} instead.", + f"Expected callable or numpy array, got {type(f) = } instead.", ) # check output vector @@ -2067,7 +2062,7 @@ def get_dofs(self, fun, dofs=None, apply_bc=False, clear=True): # compute matrix data for kernel, i.e. fun * geom_weight tot_weights = [] - if isinstance(fun_weights, xp.ndarray): + if isinstance(fun_weights, np.ndarray): tot_weights += [fun_weights * self.geom_weights] else: # loop over rows (differnt meshes) diff --git a/src/struphy/feec/psydac_derham.py b/src/struphy/feec/psydac_derham.py index e5a8cde32..3af8ec664 100644 --- a/src/struphy/feec/psydac_derham.py +++ b/src/struphy/feec/psydac_derham.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import importlib.metadata -import cunumpy as xp import psydac.core.bsplines as bsp from psydac.ddm.cart import DomainDecomposition from psydac.ddm.mpi import MockComm, MockMPI @@ -22,18 +21,16 @@ from struphy.feec.linear_operators import BoundaryOperator from struphy.feec.local_projectors_kernels import get_local_problem_size, select_quasi_points from struphy.feec.projectors import CommutingProjector, CommutingProjectorLocal -from struphy.fields_background.base import FluidEquilibrium, MHDequilibrium +from struphy.fields_background.base import MHDequilibrium from struphy.fields_background.equils import set_defaults from struphy.geometry.base import Domain from struphy.geometry.utilities import TransformedPformComponent from struphy.initial import perturbations, utilities -from struphy.initial.base import Perturbation -from struphy.initial.perturbations import Noise -from struphy.io.options import FieldsBackground, GivenInBasis, NoiseDirections from struphy.kernel_arguments.pusher_args_kernels import DerhamArguments from struphy.polar.basic import PolarDerhamSpace, PolarVector from struphy.polar.extraction_operators import PolarExtractionBlocksC1 from struphy.polar.linear_operators import PolarExtractionOperator, PolarLinearOperator +from struphy.utils.arrays import xp as np class Derham: @@ -117,7 +114,7 @@ def __init__( if dirichlet_bc is not None: assert len(dirichlet_bc) == 3 # make sure that boundary conditions are compatible with spline space - assert xp.all([bc == (False, False) for i, bc in enumerate(dirichlet_bc) if spl_kind[i]]) + assert np.all([bc == [False, False] for i, bc in enumerate(dirichlet_bc) if spl_kind[i]]) self._dirichlet_bc = dirichlet_bc @@ -300,7 +297,7 @@ def __init__( fag.basis, ] - self._spline_types_pyccel[sp_form][-1] = xp.array( + self._spline_types_pyccel[sp_form][-1] = np.array( self._spline_types_pyccel[sp_form][-1], ) # In this case we are working with a scalar valued space @@ -352,11 +349,11 @@ def __init__( self._quad_grid_spans[sp_form] += [fag.spans] self._quad_grid_bases[sp_form] += [fag.basis] - self._spline_types_pyccel[sp_form] = xp.array( + self._spline_types_pyccel[sp_form] = np.array( self._spline_types_pyccel[sp_form], ) else: - raise TypeError(f"{fem_space =} is not a valid type.") + raise TypeError(f"{fem_space = } is not a valid type.") # break points self._breaks = [space.breaks for space in _derham.spaces[0].spaces] @@ -364,8 +361,8 @@ def __init__( # index arrays self._indN = [ ( - xp.indices((space.ncells, space.degree + 1))[1] - + xp.arange( + np.indices((space.ncells, space.degree + 1))[1] + + np.arange( space.ncells, )[:, None] ) @@ -374,8 +371,8 @@ def __init__( ] self._indD = [ ( - xp.indices((space.ncells, space.degree + 1))[1] - + xp.arange( + np.indices((space.ncells, space.degree + 1))[1] + + np.arange( space.ncells, )[:, None] ) @@ -525,11 +522,11 @@ def __init__( # collect arguments for kernels self._args_derham = DerhamArguments( - xp.array(self.p), + np.array(self.p), self.Vh_fem["0"].knots[0], self.Vh_fem["0"].knots[1], self.Vh_fem["0"].knots[2], - xp.array(self.Vh["0"].starts), + np.array(self.Vh["0"].starts), ) @property @@ -873,11 +870,8 @@ def create_spline_function( name: str, space_id: str, coeffs: StencilVector | BlockVector = None, - backgrounds: FieldsBackground | list = None, - perturbations: Perturbation | list = None, - domain: Domain = None, - equil: FluidEquilibrium = None, - verbose: bool = True, + bckgr_params: dict = None, + pert_params: dict = None, ): """Creat a callable spline function. @@ -892,28 +886,19 @@ def create_spline_function( coeffs : StencilVector | BlockVector The spline coefficients. - backgrounds : FieldsBackground | list - For the initial condition. - - perturbations : Perturbation | list - For the initial condition. - - domain : Domain - Mapping for pullback/transform of initial condition. + bckgr_params : dict + Field's background parameters. - equil : FLuidEquilibrium - Fluid background used for inital condition. + pert_params : dict + Field's perturbation parameters for initial condition. """ return SplineFunction( name, space_id, self, coeffs, - backgrounds=backgrounds, - perturbations=perturbations, - domain=domain, - equil=equil, - verbose=verbose, + bckgr_params=bckgr_params, + pert_params=pert_params, ) def prepare_eval_tp_fixed(self, grids_1d): @@ -1062,7 +1047,7 @@ def _discretize_space( ) # Create uniform grid - grids = [xp.linspace(xmin, xmax, num=ne + 1) for xmin, xmax, ne in zip(min_coords, max_coords, ncells)] + grids = [np.linspace(xmin, xmax, num=ne + 1) for xmin, xmax, ne in zip(min_coords, max_coords, ncells)] # Create 1D finite element spaces and precompute quadrature data spaces_1d = [ @@ -1093,7 +1078,7 @@ def _discretize_space( elif V == "L2": Wh = Vh.reduce_degree(axes=[0, 1, 2], multiplicity=Vh.multiplicity, basis=basis) else: - raise ValueError(f"V must be one of H1, Hcurl, Hdiv or L2, but is {V =}.") + raise ValueError(f"V must be one of H1, Hcurl, Hdiv or L2, but is {V = }.") Wh.symbolic_space = V for key in Wh._refined_space: @@ -1107,7 +1092,7 @@ def _get_domain_array(self): Returns ------- - dom_arr : xp.ndarray + dom_arr : np.ndarray A 2d array of shape (#MPI processes, 9). The row index denotes the process rank. The columns are for n=0,1,2: - arr[i, 3*n + 0] holds the LEFT domain boundary of process i in direction eta_(n+1). - arr[i, 3*n + 1] holds the RIGHT domain boundary of process i in direction eta_(n+1). @@ -1121,10 +1106,10 @@ def _get_domain_array(self): nproc = 1 # send buffer - dom_arr_loc = xp.zeros(9, dtype=float) + dom_arr_loc = np.zeros(9, dtype=float) # main array (receive buffers) - dom_arr = xp.zeros(nproc * 9, dtype=float) + dom_arr = np.zeros(nproc * 9, dtype=float) # Get global starts and ends of domain decomposition gl_s = self.domain_decomposition.starts @@ -1155,7 +1140,7 @@ def _get_index_array(self, decomposition): Returns ------- - ind_arr : xp.ndarray + ind_arr : np.ndarray A 2d array of shape (#MPI processes, 6). The row index denotes the process rank. The columns are for n=0,1,2: - arr[i, 2*n + 0] holds the global start index process i in direction eta_(n+1). - arr[i, 2*n + 1] holds the global end index of process i in direction eta_(n+1). @@ -1168,10 +1153,10 @@ def _get_index_array(self, decomposition): nproc = 1 # send buffer - ind_arr_loc = xp.zeros(6, dtype=int) + ind_arr_loc = np.zeros(6, dtype=int) # main array (receive buffers) - ind_arr = xp.zeros(nproc * 6, dtype=int) + ind_arr = np.zeros(nproc * 6, dtype=int) # Get global starts and ends of cart OR domain decomposition gl_s = decomposition.starts @@ -1214,13 +1199,13 @@ def _get_neighbours(self): Returns ------- - neighbours : xp.ndarray + neighbours : np.ndarray A 3d array of shape (3,3,3). The i-th axis is the direction eta_(i+1). Neighbours along the faces have index with two 1s, neighbours along the edges only have one 1, neighbours along the edges have no 1 in the index. """ - neighs = xp.empty((3, 3, 3), dtype=int) + neighs = np.empty((3, 3, 3), dtype=int) for i in range(3): for j in range(3): @@ -1265,12 +1250,12 @@ def _get_neighbour_one_component(self, comp): if comp == [1, 1, 1]: return neigh_id - comp = xp.array(comp) - kinds = xp.array(kinds) + comp = np.array(comp) + kinds = np.array(kinds) # if only one process: check if comp is neighbour in non-peridic directions, if this is not the case then return the rank as neighbour id if size == 1: - if (comp[~kinds] == 1).all(): + if (comp[kinds == False] == 1).all(): return rank # multiple processes @@ -1301,15 +1286,15 @@ def _get_neighbour_one_component(self, comp): "Wrong value for component; must be 0 or 1 or 2 !", ) - neigh_inds = xp.array(neigh_inds) + neigh_inds = np.array(neigh_inds) # only use indices where information is present to find the neighbours rank - inds = xp.where(neigh_inds != None) + inds = np.where(neigh_inds != None) # find ranks (row index of domain_array) which agree in start/end indices - index_temp = xp.squeeze(self.index_array[:, inds]) - unique_ranks = xp.where( - xp.equal(index_temp, neigh_inds[inds]).all(1), + index_temp = np.squeeze(self.index_array[:, inds]) + unique_ranks = np.where( + np.equal(index_temp, neigh_inds[inds]).all(1), )[0] # if any row satisfies condition, return its index (=rank of neighbour) @@ -1329,7 +1314,7 @@ def _get_span_and_basis_for_eval_mpi(self, etas, Nspace, end): Parameters ---------- - etas : xp.array + etas : np.array 1d array of evaluation points (ascending). Nspace : SplineSpace @@ -1340,13 +1325,13 @@ def _get_span_and_basis_for_eval_mpi(self, etas, Nspace, end): Returns ------- - spans : xp.array + spans : np.array 1d array of knot span indices. - bn : xp.array + bn : np.array 2d array of pn + 1 values of N-splines indexed by (eta, spline value). - bd : xp.array + bd : np.array 2d array of pn values of D-splines indexed by (eta, spline value). """ @@ -1356,11 +1341,11 @@ def _get_span_and_basis_for_eval_mpi(self, etas, Nspace, end): Tn = Nspace.knots pn = Nspace.degree - spans = xp.zeros(etas.size, dtype=int) - bns = xp.zeros((etas.size, pn + 1), dtype=float) - bds = xp.zeros((etas.size, pn), dtype=float) - bn = xp.zeros(pn + 1, dtype=float) - bd = xp.zeros(pn, dtype=float) + spans = np.zeros(etas.size, dtype=int) + bns = np.zeros((etas.size, pn + 1), dtype=float) + bds = np.zeros((etas.size, pn), dtype=float) + bn = np.zeros(pn + 1, dtype=float) + bd = np.zeros(pn, dtype=float) for n in range(etas.size): # avoid 1. --> 0. for clamped interpolation @@ -1406,17 +1391,11 @@ class SplineFunction: coeffs : StencilVector | BlockVector The spline coefficients (optional). - backgrounds : FieldsBackground | list - For the initial condition. - - perturbations : Perturbation | list - For the initial condition. - - domain : Domain - Mapping for pullback/transform of initial condition. + bckgr_params : dict + Field's background parameters. - equil : FluidEquilibrium - Fluid background used for inital condition. + pert_params : dict + Field's perturbation parameters for initial condition. """ def __init__( @@ -1425,19 +1404,14 @@ def __init__( space_id: str, derham: Derham, coeffs: StencilVector | BlockVector = None, - backgrounds: FieldsBackground | list = None, - perturbations: Perturbation | list = None, - domain: Domain = None, - equil: FluidEquilibrium = None, - verbose: bool = True, + bckgr_params: dict = None, + pert_params: dict = None, ): self._name = name self._space_id = space_id self._derham = derham - self._backgrounds = backgrounds - self._perturbations = perturbations - self._domain = domain - self._equil = equil + self._bckgr_params = bckgr_params + self._pert_params = pert_params # initialize field in memory (FEM space, vector and tensor product (stencil) vector) self._space_key = derham.space_to_form[space_id] @@ -1477,12 +1451,6 @@ def __init__( else: self._nbasis = [tuple([space.nbasis for space in vec_space.spaces]) for vec_space in self.fem_space.spaces] - if verbose and MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nAllocated SplineFuntion '{self.name}' in space '{self.space_id}'.") - - if self.backgrounds is not None or self.perturbations is not None: - self.initialize_coeffs(domain=self.domain, equil=self.equil) - @property def name(self): """Name of the field in data container (string).""" @@ -1503,16 +1471,6 @@ def derham(self): """3d Derham complex struphy.feec.psydac_derham.Derham.""" return self._derham - @property - def domain(self): - """Mapping for pullback/transform of initial condition.""" - return self._domain - - @property - def equil(self): - """Fluid equilibirum used for initial condition.""" - return self._equil - @property def space(self): """Coefficient space (VectorSpace) of the field.""" @@ -1538,7 +1496,7 @@ def vector(self, value): """In-place setter for Stencil-/Block-/PolarVector.""" if isinstance(self._vector, StencilVector): - assert isinstance(value, (StencilVector, xp.ndarray)) + assert isinstance(value, (StencilVector, np.ndarray)) s1, s2, s3 = self.starts e1, e2, e3 = self.ends @@ -1561,10 +1519,10 @@ def vector(self, value): self._vector.set_vector(value) else: if isinstance(self._vector.tp, StencilVector): - assert isinstance(value[0], xp.ndarray) + assert isinstance(value[0], np.ndarray) assert isinstance( value[1], - (StencilVector, xp.ndarray), + (StencilVector, np.ndarray), ) self._vector.pol[0][:] = value[0][:] @@ -1573,16 +1531,14 @@ def vector(self, value): e1, e2, e3 = self.ends self._vector.tp[s1 : e1 + 1, s2 : e2 + 1, s3 : e3 + 1] = value[1][ - s1 : e1 + 1, - s2 : e2 + 1, - s3 : e3 + 1, + s1 : e1 + 1, s2 : e2 + 1, s3 : e3 + 1 ] else: for n in range(3): - assert isinstance(value[n][0], xp.ndarray) + assert isinstance(value[n][0], np.ndarray) assert isinstance( value[n][1], - (StencilVector, xp.ndarray), + (StencilVector, np.ndarray), ) self._vector.pol[n][:] = value[n][0][:] @@ -1591,9 +1547,7 @@ def vector(self, value): e1, e2, e3 = self.ends[n] self._vector.tp[n][s1 : e1 + 1, s2 : e2 + 1, s3 : e3 + 1] = value[n][1][ - s1 : e1 + 1, - s2 : e2 + 1, - s3 : e3 + 1, + s1 : e1 + 1, s2 : e2 + 1, s3 : e3 + 1 ] self._vector.update_ghost_regions() @@ -1629,14 +1583,14 @@ def vector_stencil(self): return self._vector_stencil @property - def backgrounds(self) -> FieldsBackground | list: - """For the initial condition.""" - return self._backgrounds + def bckgr_params(self): + """Field's background parameters.""" + return self._bckgr_params @property - def perturbations(self) -> Perturbation | list: - """For the initial condition.""" - return self._perturbations + def pert_params(self): + """Field's perturbation parameters for initial condition.""" + return self._pert_params ############### ### Methods ### @@ -1658,180 +1612,173 @@ def extract_coeffs(self, update_ghost_regions=True): def initialize_coeffs( self, *, - backgrounds: FieldsBackground | list = None, - perturbations: Perturbation | list = None, - domain: Domain = None, - equil: FluidEquilibrium = None, + bckgr_params=None, + pert_params=None, + domain=None, + bckgr_obj=None, + species=None, ): """ - Set the initial conditions for self.vector. - """ + Sets the initial conditions for self.vector. - # set background paramters - if backgrounds is not None: - # if self.backgrounds is not None: - # print(f"Attention: overwriting backgrounds for {self.name}") - self._backgrounds = backgrounds + Parameters + ---------- + bckgr_params : dict + Field's background parameters. - # set perturbation paramters - if perturbations is not None: - # if self.perturbations is not None: - # print(f"Attention: overwriting perturbation parameters for {self.name}") - self._perturbations = perturbations + pert_params : dict + Field's perturbation parameters for initial condition. + + domain : struphy.geometry.domains + Domain object for metric coefficients, only needed for transform of analytical perturbations. + + bckgr_obj: FluidEquilibrium + Fields background object. - # set domain - if domain is not None: - # if self.domain is not None: - # print(f"Attention: overwriting domain for {self.name}") - self._domain = domain + species : string + Species name (e.g. "mhd") the field belongs to. + """ - if isinstance(self.backgrounds, FieldsBackground): - self._backgrounds = [self.backgrounds] + # set background paramters + if bckgr_params is not None: + if self._bckgr_params is not None: + print(f"Attention: overwriting background parameters for {self.name}") + self._bckgr_params = bckgr_params - if isinstance(self.perturbations, Perturbation): - self._perturbations = [self.perturbations] + # set perturbation paramters + if pert_params is not None: + if self._pert_params is not None: + print(f"Attention: overwriting perturbation parameters for {self.name}") + self._pert_params = pert_params - # start from zero coeffs self._vector *= 0.0 if MPI.COMM_WORLD.Get_rank() == 0: print(f"Initializing {self.name} ...") - # add backgrounds to initial vector - if self.backgrounds is not None: - for fb in self.backgrounds: - assert isinstance(fb, FieldsBackground) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"Adding background {fb} ...") + # add background to initial vector + if self.bckgr_params is not None: + for _type in self.bckgr_params: + _params = self.bckgr_params[_type].copy() # special case of const - if fb.type == "LogicalConst": - vals = fb.values - assert isinstance(vals, (list, tuple)) + if "LogicalConst" in _type: + _val = _params["values"] if self.space_id in {"H1", "L2"}: + assert isinstance(_val, float) or isinstance(_val, int) def f_tmp(e1, e2, e3): - return vals[0] + 0.0 * e1 + return _val + 0.0 * e1 fun = f_tmp else: - assert len(vals) == 3 + assert isinstance(_val, list) + assert len(_val) == 3 fun = [] + for i, _v in enumerate(_val): + assert isinstance(_v, float) or isinstance(_v, int) or _v is None - if vals[0] is not None: - fun += [lambda e1, e2, e3: vals[0] + 0.0 * e1] + if _val[0] is not None: + fun += [lambda e1, e2, e3: _val[0] + 0.0 * e1] else: fun += [lambda e1, e2, e3: 0.0 * e1] - if vals[1] is not None: - fun += [lambda e1, e2, e3: vals[1] + 0.0 * e1] + if _val[1] is not None: + fun += [lambda e1, e2, e3: _val[1] + 0.0 * e1] else: fun += [lambda e1, e2, e3: 0.0 * e1] - if vals[2] is not None: - fun += [lambda e1, e2, e3: vals[2] + 0.0 * e1] + if _val[2] is not None: + fun += [lambda e1, e2, e3: _val[2] + 0.0 * e1] else: fun += [lambda e1, e2, e3: 0.0 * e1] else: - assert equil is not None - var = fb.variable - assert var in dir(MHDequilibrium), f"{var =} is not an attribute of any fields background." + assert bckgr_obj is not None + _var = _params["variable"] + assert _var in dir(MHDequilibrium), f"{_var = } is not an attribute of any fields background." if self.space_id in {"H1", "L2"}: - fun = getattr(equil, var) + fun = getattr(bckgr_obj, _var) else: - assert (var + "_1") in dir(MHDequilibrium), ( - f"{(var + '_1') =} is not an attribute of any fields background." + assert (_var + "_1") in dir(MHDequilibrium), ( + f"{(_var + '_1') = } is not an attribute of any fields background." ) fun = [ - getattr(equil, var + "_1"), - getattr(equil, var + "_2"), - getattr(equil, var + "_3"), + getattr(bckgr_obj, _var + "_1"), + getattr(bckgr_obj, _var + "_2"), + getattr(bckgr_obj, _var + "_3"), ] - # perform projection + # peform projection self.vector += self.derham.P[self.space_key](fun) # add perturbations to coefficient vector - if self.perturbations is not None: - for ptb in self.perturbations: + if self.pert_params is not None: + for _type in self.pert_params: if MPI.COMM_WORLD.Get_rank() == 0: - print(f"Adding perturbation {ptb} ...") + print(f"Adding perturbation {_type} ...") + + _params = self.pert_params[_type].copy() # special case of white noise in logical space for different components - if isinstance(ptb, Noise): + if "noise" in _type: + # component(s) to perturb + if isinstance(_params["comps"], bool): + comps = [_params["comps"]] + else: + comps = _params["comps"] + _params.pop("comps") + # set white noise FE coefficients - self._add_noise( - direction=ptb.direction, - amp=ptb.amp, - seed=ptb.seed, - n=ptb.comp, - ) - # perturbation class - elif isinstance(ptb, Perturbation): if self.space_id in {"H1", "L2"}: - fun = TransformedPformComponent( - ptb, - ptb.given_in_basis, - self.space_key, - domain=domain, - ) + if comps[0]: + self._add_noise(**_params) elif self.space_id in {"Hcurl", "Hdiv", "H1vec"}: - fun_vec = [None] * 3 - fun_vec[ptb.comp] = ptb + for n, comp in enumerate(comps): + if comp: + self._add_noise(**_params, n=n) - # pullback callable for each component - fun = [] - for comp in range(3): - fun += [ - TransformedPformComponent( - fun_vec, - ptb.given_in_basis, - self.space_key, - comp=comp, - domain=domain, - ), - ] + # given function class + elif _type in dir(perturbations): + fun = transform_perturbation(_type, _params, self.space_key, domain) # peform projection self.vector += self.derham.P[self.space_key](fun) - # TODO: re-add Eigfun and InitFromOutput in new framework - # loading of MHD eigenfunction (legacy code, might not be up to date) - # elif "EigFun" in _type: - # print("Warning: Eigfun is not regularly tested ...") - # from struphy.initial import eigenfunctions - - # # select class - # funs = getattr(eigenfunctions, _type)( - # self.derham, - # **_params, - # ) - - # # select eigenvector and set coefficients - # if hasattr(funs, self.name): - # eig_vec = getattr(funs, self.name) - - # self.vector += eig_vec - - # # initialize from existing output file - # elif "InitFromOutput" in _type: - # # select class - # o_data = getattr(utilities, _type)( - # self.derham, - # self.name, - # species, - # **_params, - # ) - - # if isinstance(self.vector, StencilVector): - # self.vector._data[:] += o_data.vector - - # else: - # for n in range(3): - # self.vector[n]._data[:] += o_data.vector[n] + elif "EigFun" in _type: + print("Warning: Eigfun is not regularly tested ...") + from struphy.initial import eigenfunctions + + # select class + funs = getattr(eigenfunctions, _type)( + self.derham, + **_params, + ) + + # select eigenvector and set coefficients + if hasattr(funs, self.name): + eig_vec = getattr(funs, self.name) + + self.vector += eig_vec + + # initialize from existing output file + elif "InitFromOutput" in _type: + # select class + o_data = getattr(utilities, _type)( + self.derham, + self.name, + species, + **_params, + ) + + if isinstance(self.vector, StencilVector): + self.vector._data[:] += o_data.vector + + else: + for n in range(3): + self.vector[n]._data[:] += o_data.vector[n] # apply boundary operator (in-place) self.derham.boundary_ops[self.space_key].dot( @@ -1888,7 +1835,7 @@ def eval_tp_fixed_loc(self, spans, bases, out=None): assert [span.size for span in spans] == [base.shape[0] for base in bases] if out is None: - out = xp.empty([span.size for span in spans], dtype=float) + out = np.empty([span.size for span in spans], dtype=float) else: assert out.shape == tuple([span.size for span in spans]) @@ -1897,8 +1844,8 @@ def eval_tp_fixed_loc(self, spans, bases, out=None): *bases, vec._data, self.derham.spline_types_pyccel[self.space_key], - xp.array(self.derham.p), - xp.array(self.starts), + np.array(self.derham.p), + np.array(self.starts), out, ) @@ -1912,7 +1859,7 @@ def eval_tp_fixed_loc(self, spans, bases, out=None): assert [span.size for span in spans] == [base.shape[0] for base in bases[i]] if out_is_none: - out += xp.empty( + out += np.empty( [span.size for span in spans], dtype=float, ) @@ -1926,10 +1873,10 @@ def eval_tp_fixed_loc(self, spans, bases, out=None): *bases[i], vec[i]._data, self.derham.spline_types_pyccel[self.space_key][i], - xp.array( + np.array( self.derham.p, ), - xp.array( + np.array( self.starts[i], ), out[i], @@ -1996,14 +1943,14 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): # prepare arrays for AllReduce if tmp is None: - tmp = xp.zeros( + tmp = np.zeros( tmp_shape, dtype=float, ) else: - assert isinstance(tmp, xp.ndarray) + assert isinstance(tmp, np.ndarray) assert tmp.shape == tmp_shape - assert tmp.dtype.type is xp.float64 + assert tmp.dtype.type is np.float64 tmp[:] = 0.0 # scalar-valued field @@ -2018,11 +1965,11 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): E3, self._vector_stencil._data, kind, - xp.array(self.derham.p), + np.array(self.derham.p), T1, T2, T3, - xp.array(self.starts), + np.array(self.starts), tmp, ) elif marker_evaluation: @@ -2031,11 +1978,11 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): markers, self._vector_stencil._data, kind, - xp.array(self.derham.p), + np.array(self.derham.p), T1, T2, T3, - xp.array(self.starts), + np.array(self.starts), tmp, ) else: @@ -2046,16 +1993,16 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): E3, self._vector_stencil._data, kind, - xp.array(self.derham.p), + np.array(self.derham.p), T1, T2, T3, - xp.array(self.starts), + np.array(self.starts), tmp, ) if self.derham.comm is not None: - if not local: + if local == False: self.derham.comm.Allreduce( MPI.IN_PLACE, tmp, @@ -2070,7 +2017,7 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): out += tmp if squeeze_out: - out = xp.squeeze(out) + out = np.squeeze(out) if out.ndim == 0: out = out.item() @@ -2089,11 +2036,11 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): E3, self._vector_stencil[n]._data, kind, - xp.array(self.derham.p), + np.array(self.derham.p), T1, T2, T3, - xp.array(self.starts[n]), + np.array(self.starts[n]), tmp, ) elif marker_evaluation: @@ -2102,11 +2049,11 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): markers, self._vector_stencil[n]._data, kind, - xp.array(self.derham.p), + np.array(self.derham.p), T1, T2, T3, - xp.array(self.starts[n]), + np.array(self.starts[n]), tmp, ) else: @@ -2117,16 +2064,16 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): E3, self._vector_stencil[n]._data, kind, - xp.array(self.derham.p), + np.array(self.derham.p), T1, T2, T3, - xp.array(self.starts[n]), + np.array(self.starts[n]), tmp, ) if self.derham.comm is not None: - if not local: + if local == False: self.derham.comm.Allreduce( MPI.IN_PLACE, tmp, @@ -2143,7 +2090,7 @@ def __call__(self, *etas, out=None, tmp=None, squeeze_out=False, local=False): tmp[:] = 0.0 if squeeze_out: - out[-1] = xp.squeeze(out[-1]) + out[-1] = np.squeeze(out[-1]) if out[-1].ndim == 0: out[-1] = out[-1].item() @@ -2176,11 +2123,11 @@ def _flag_pts_not_on_proc(self, *etas): markers = etas[0] # check which particles are on the current process domain - is_on_proc_domain = xp.logical_and( + is_on_proc_domain = np.logical_and( markers[:, :3] >= dom_arr[rank, 0::3], markers[:, :3] <= dom_arr[rank, 1::3], ) - on_proc = xp.all(is_on_proc_domain, axis=1) + on_proc = np.all(is_on_proc_domain, axis=1) markers[~on_proc, :] = -1.0 @@ -2206,15 +2153,15 @@ def _flag_pts_not_on_proc(self, *etas): E3[E3 == dom_arr[rank, 7]] += 1e-8 # True for eval points on current process - E1_on_proc = xp.logical_and( + E1_on_proc = np.logical_and( E1 >= dom_arr[rank, 0], E1 <= dom_arr[rank, 1], ) - E2_on_proc = xp.logical_and( + E2_on_proc = np.logical_and( E2 >= dom_arr[rank, 3], E2 <= dom_arr[rank, 4], ) - E3_on_proc = xp.logical_and( + E3_on_proc = np.logical_and( E3 >= dom_arr[rank, 6], E3 <= dom_arr[rank, 7], ) @@ -2224,13 +2171,7 @@ def _flag_pts_not_on_proc(self, *etas): E2[~E2_on_proc] = -1.0 E3[~E3_on_proc] = -1.0 - def _add_noise( - self, - direction: NoiseDirections = "e3", - amp: float = 0.0001, - seed: int = None, - n: int = None, - ): + def _add_noise(self, direction="e3", amp=0.0001, seed=None, n=None): """Add noise to a vector component where init_comps==True, otherwise leave at zero. Parameters @@ -2375,7 +2316,7 @@ def _tmp_noise_for_mpi(self, *shapes, direction="e3", amp=0.0001, seed=None): Returns ------- - _amps : xp.array + _amps : np.array The noisy FE coefficients in the desired direction (1d, 2d or 3d array).""" if self.derham.comm is not None: @@ -2390,40 +2331,40 @@ def _tmp_noise_for_mpi(self, *shapes, direction="e3", amp=0.0001, seed=None): domain_array = self.derham.domain_array if seed is not None: - xp.random.seed(seed) + np.random.seed(seed) # temporary - _amps = xp.zeros(shapes) + _amps = np.zeros(shapes) # no process has been drawn for yet - already_drawn = xp.zeros(nprocs) == 1.0 + already_drawn = np.zeros(nprocs) == 1.0 # 1d mid point arrays in each direction mid_points = [] for npr in nprocs: delta = 1.0 / npr - mid_points_i = xp.zeros(npr) + mid_points_i = np.zeros(npr) for n in range(npr): mid_points_i[n] = delta * (n + 1 / 2) mid_points += [mid_points_i] if direction == "e1": - tmp_arrays = xp.zeros(nprocs[0]).tolist() + tmp_arrays = np.zeros(nprocs[0]).tolist() elif direction == "e2": - tmp_arrays = xp.zeros(nprocs[1]).tolist() + tmp_arrays = np.zeros(nprocs[1]).tolist() elif direction == "e3": - tmp_arrays = xp.zeros(nprocs[2]).tolist() + tmp_arrays = np.zeros(nprocs[2]).tolist() elif direction == "e1e2": - tmp_arrays = xp.zeros((nprocs[0], nprocs[1])).tolist() + tmp_arrays = np.zeros((nprocs[0], nprocs[1])).tolist() Warning, f"2d noise in the directions {direction} is not correctly initilaized for MPI !!" elif direction == "e1e3": - tmp_arrays = xp.zeros((nprocs[0], nprocs[2])).tolist() + tmp_arrays = np.zeros((nprocs[0], nprocs[2])).tolist() Warning, f"2d noise in the directions {direction} is not correctly initilaized for MPI !!" elif direction == "e2e3": - tmp_arrays = xp.zeros((nprocs[1], nprocs[2])).tolist() + tmp_arrays = np.zeros((nprocs[1], nprocs[2])).tolist() Warning, f"2d noise in the directions {direction} is not correctly initilaized for MPI !!" elif direction == "e1e2e3": - tmp_arrays = xp.zeros((nprocs[0], nprocs[1], nprocs[2])).tolist() + tmp_arrays = np.zeros((nprocs[0], nprocs[1], nprocs[2])).tolist() Warning, f"3d noise in the directions {direction} is not correctly initilaized for MPI !!" else: raise ValueError("Invalid direction for tmp_arrays.") @@ -2432,7 +2373,7 @@ def _tmp_noise_for_mpi(self, *shapes, direction="e3", amp=0.0001, seed=None): inds_current = [] for n in range(3): mid_pt_current = (domain_array[rank, 3 * n] + domain_array[rank, 3 * n + 1]) / 2.0 - inds_current += [xp.argmin(xp.abs(mid_points[n] - mid_pt_current))] + inds_current += [np.argmin(np.abs(mid_points[n] - mid_pt_current))] # loop over processes for i in range(comm_size): @@ -2440,7 +2381,7 @@ def _tmp_noise_for_mpi(self, *shapes, direction="e3", amp=0.0001, seed=None): inds = [] for n in range(3): mid_pt = (domain_array[i, 3 * n] + domain_array[i, 3 * n + 1]) / 2.0 - inds += [xp.argmin(xp.abs(mid_points[n] - mid_pt))] + inds += [np.argmin(np.abs(mid_points[n] - mid_pt))] if already_drawn[inds[0], inds[1], inds[2]]: if direction == "e1": @@ -2462,7 +2403,7 @@ def _tmp_noise_for_mpi(self, *shapes, direction="e3", amp=0.0001, seed=None): if direction == "e1": tmp_arrays[inds[0]] = ( ( - xp.random.rand( + np.random.rand( *shapes, ) - 0.5 @@ -2475,7 +2416,7 @@ def _tmp_noise_for_mpi(self, *shapes, direction="e3", amp=0.0001, seed=None): elif direction == "e2": tmp_arrays[inds[1]] = ( ( - xp.random.rand( + np.random.rand( *shapes, ) - 0.5 @@ -2488,7 +2429,7 @@ def _tmp_noise_for_mpi(self, *shapes, direction="e3", amp=0.0001, seed=None): elif direction == "e3": tmp_arrays[inds[2]] = ( ( - xp.random.rand( + np.random.rand( *shapes, ) - 0.5 @@ -2499,23 +2440,23 @@ def _tmp_noise_for_mpi(self, *shapes, direction="e3", amp=0.0001, seed=None): already_drawn[:, :, inds[2]] = True _amps[:] = tmp_arrays[inds[2]] elif direction == "e1e2": - tmp_arrays[inds[0]][inds[1]] = (xp.random.rand(*shapes) - 0.5) * 2.0 * amp + tmp_arrays[inds[0]][inds[1]] = (np.random.rand(*shapes) - 0.5) * 2.0 * amp already_drawn[inds[0], inds[1], :] = True _amps[:] = tmp_arrays[inds[0]][inds[1]] elif direction == "e1e3": - tmp_arrays[inds[0]][inds[2]] = (xp.random.rand(*shapes) - 0.5) * 2.0 * amp + tmp_arrays[inds[0]][inds[2]] = (np.random.rand(*shapes) - 0.5) * 2.0 * amp already_drawn[inds[0], :, inds[2]] = True _amps[:] = tmp_arrays[inds[0]][inds[2]] elif direction == "e2e3": - tmp_arrays[inds[1]][inds[2]] = (xp.random.rand(*shapes) - 0.5) * 2.0 * amp + tmp_arrays[inds[1]][inds[2]] = (np.random.rand(*shapes) - 0.5) * 2.0 * amp already_drawn[:, inds[1], inds[2]] = True _amps[:] = tmp_arrays[inds[1]][inds[2]] elif direction == "e1e2e3": - tmp_arrays[inds[0]][inds[1]][inds[2]] = (xp.random.rand(*shapes) - 0.5) * 2.0 * amp + tmp_arrays[inds[0]][inds[1]][inds[2]] = (np.random.rand(*shapes) - 0.5) * 2.0 * amp already_drawn[inds[0], inds[1], inds[2]] = True _amps[:] = tmp_arrays[inds[0]][inds[1]][inds[2]] - if xp.all(xp.array([ind_c == ind for ind_c, ind in zip(inds_current, inds)])): + if np.all(np.array([ind_c == ind for ind_c, ind in zip(inds_current, inds)])): return _amps @@ -2766,16 +2707,16 @@ def get_pts_and_wts(space_1d, start, end, n_quad=None, polar_shift=False): histopol_loc = space_1d.histopolation_grid[start : end + 2].copy() # make sure that greville points used for interpolation are in [0, 1] - assert xp.all(xp.logical_and(greville_loc >= 0.0, greville_loc <= 1.0)) + assert np.all(np.logical_and(greville_loc >= 0.0, greville_loc <= 1.0)) # interpolation if space_1d.basis == "B": x_grid = greville_loc pts = greville_loc[:, None] - wts = xp.ones(pts.shape, dtype=float) + wts = np.ones(pts.shape, dtype=float) # sub-interval index is always 0 for interpolation. - subs = xp.zeros(pts.shape[0], dtype=int) + subs = np.zeros(pts.shape[0], dtype=int) # !! shift away first interpolation point in eta_1 direction for polar domains !! if pts[0] == 0.0 and polar_shift: @@ -2789,27 +2730,27 @@ def get_pts_and_wts(space_1d, start, end, n_quad=None, polar_shift=False): union_breaks = space_1d.breaks[:-1] # Make union of Greville and break points - tmp = set(xp.round(space_1d.histopolation_grid, decimals=14)).union( - xp.round(union_breaks, decimals=14), + tmp = set(np.round(space_1d.histopolation_grid, decimals=14)).union( + np.round(union_breaks, decimals=14), ) tmp = list(tmp) tmp.sort() - tmp_a = xp.array(tmp) + tmp_a = np.array(tmp) x_grid = tmp_a[ - xp.logical_and( + np.logical_and( tmp_a - >= xp.min( + >= np.min( histopol_loc, ) - 1e-14, - tmp_a <= xp.max(histopol_loc) + 1e-14, + tmp_a <= np.max(histopol_loc) + 1e-14, ) ] # determine subinterval index (= 0 or 1): - subs = xp.zeros(x_grid[:-1].size, dtype=int) + subs = np.zeros(x_grid[:-1].size, dtype=int) for n, x_h in enumerate(x_grid[:-1]): add = 1 for x_g in histopol_loc: @@ -2822,7 +2763,7 @@ def get_pts_and_wts(space_1d, start, end, n_quad=None, polar_shift=False): # products of basis functions are integrated exactly n_quad = space_1d.degree + 1 - pts_loc, wts_loc = xp.polynomial.legendre.leggauss(n_quad) + pts_loc, wts_loc = np.polynomial.legendre.leggauss(n_quad) x, wts = bsp.quadrature_grid(x_grid, pts_loc, wts_loc) @@ -2885,12 +2826,12 @@ def get_pts_and_wts_quasi( # interpolation if space_1d.basis == "B": if p == 1 and h != 1.0: - x_grid = xp.linspace(-(p - 1) * h, 1.0 - h + (h / 2.0), (N + p - 1) * 2) + x_grid = np.linspace(-(p - 1) * h, 1.0 - h + (h / 2.0), (N + p - 1) * 2) else: - x_grid = xp.linspace(-(p - 1) * h, 1.0 - h, (N + p - 1) * 2 - 1) + x_grid = np.linspace(-(p - 1) * h, 1.0 - h, (N + p - 1) * 2 - 1) pts = x_grid[:, None] % 1.0 - wts = xp.ones(pts.shape, dtype=float) + wts = np.ones(pts.shape, dtype=float) # !! shift away first interpolation point in eta_1 direction for polar domains !! if pts[0] == 0.0 and polar_shift: @@ -2901,16 +2842,16 @@ def get_pts_and_wts_quasi( # The computation of histopolation points breaks in case we have Nel=1 and periodic boundary conditions since we end up with only one x_grid point. # We need to build the histopolation points by hand in this scenario. if p == 0 and h == 1.0: - x_grid = xp.array([0.0, 0.5, 1.0]) + x_grid = np.array([0.0, 0.5, 1.0]) elif p == 0 and h != 1.0: - x_grid = xp.linspace(-p * h, 1.0 - h + (h / 2.0), (N + p) * 2) + x_grid = np.linspace(-p * h, 1.0 - h + (h / 2.0), (N + p) * 2) else: - x_grid = xp.linspace(-p * h, 1.0 - h, (N + p) * 2 - 1) + x_grid = np.linspace(-p * h, 1.0 - h, (N + p) * 2 - 1) n_quad = p + 1 # Gauss - Legendre quadrature points and weights # products of basis functions are integrated exactly - pts_loc, wts_loc = xp.polynomial.legendre.leggauss(n_quad) + pts_loc, wts_loc = np.polynomial.legendre.leggauss(n_quad) x, wts = bsp.quadrature_grid(x_grid, pts_loc, wts_loc) pts = x % 1.0 @@ -2924,26 +2865,26 @@ def get_pts_and_wts_quasi( N_b = N + p # Filling the quasi-interpolation points for i=0 and i=1 (since they are equal) - x_grid = xp.linspace(0.0, knots[p + 1], p + 1) - x_aux = xp.linspace(0.0, knots[p + 1], p + 1) - x_grid = xp.append(x_grid, x_aux) + x_grid = np.linspace(0.0, knots[p + 1], p + 1) + x_aux = np.linspace(0.0, knots[p + 1], p + 1) + x_grid = np.append(x_grid, x_aux) # Now we append those for 1 V2):") res_PSY = OPS_PSY.Q1.dot(x1_st) - res_STR = OPS_STR.Q1_dot(xp.concatenate((x1[0].flatten(), x1[1].flatten(), x1[2].flatten()))) + res_STR = OPS_STR.Q1_dot(np.concatenate((x1[0].flatten(), x1[1].flatten(), x1[2].flatten()))) res_STR_0, res_STR_1, res_STR_2 = SPACES.extract_2(res_STR) MPI_COMM.Barrier() @@ -284,7 +284,7 @@ def test_some_basis_ops(Nel, p, spl_kind, mapping): Q1T = OPS_PSY.Q1.transpose() res_PSY = Q1T.dot(x2_st) - res_STR = OPS_STR.transpose_Q1_dot(xp.concatenate((x2[0].flatten(), x2[1].flatten(), x2[2].flatten()))) + res_STR = OPS_STR.transpose_Q1_dot(np.concatenate((x2[0].flatten(), x2[1].flatten(), x2[2].flatten()))) res_STR_0, res_STR_1, res_STR_2 = SPACES.extract_1(res_STR) MPI_COMM.Barrier() @@ -310,7 +310,7 @@ def test_some_basis_ops(Nel, p, spl_kind, mapping): print("\nW1 (V1 --> V1, Identity operator in this case):") res_PSY = OPS_PSY.W1.dot(x1_st) - res_STR = OPS_STR.W1_dot(xp.concatenate((x1[0].flatten(), x1[1].flatten(), x1[2].flatten()))) + res_STR = OPS_STR.W1_dot(np.concatenate((x1[0].flatten(), x1[1].flatten(), x1[2].flatten()))) res_STR_0, res_STR_1, res_STR_2 = SPACES.extract_1(res_STR) MPI_COMM.barrier() @@ -333,7 +333,7 @@ def test_some_basis_ops(Nel, p, spl_kind, mapping): W1T = OPS_PSY.W1.transpose() res_PSY = W1T.dot(x1_st) - res_STR = OPS_STR.transpose_W1_dot(xp.concatenate((x1[0].flatten(), x1[1].flatten(), x1[2].flatten()))) + res_STR = OPS_STR.transpose_W1_dot(np.concatenate((x1[0].flatten(), x1[1].flatten(), x1[2].flatten()))) res_STR_0, res_STR_1, res_STR_2 = SPACES.extract_1(res_STR) MPI_COMM.barrier() @@ -359,7 +359,7 @@ def test_some_basis_ops(Nel, p, spl_kind, mapping): print("\nQ2 (V2 --> V2, Identity operator in this case):") res_PSY = OPS_PSY.Q2.dot(x2_st) - res_STR = OPS_STR.Q2_dot(xp.concatenate((x2[0].flatten(), x2[1].flatten(), x2[2].flatten()))) + res_STR = OPS_STR.Q2_dot(np.concatenate((x2[0].flatten(), x2[1].flatten(), x2[2].flatten()))) res_STR_0, res_STR_1, res_STR_2 = SPACES.extract_2(res_STR) MPI_COMM.Barrier() @@ -382,7 +382,7 @@ def test_some_basis_ops(Nel, p, spl_kind, mapping): Q2T = OPS_PSY.Q2.transpose() res_PSY = Q2T.dot(x2_st) - res_STR = OPS_STR.transpose_Q2_dot(xp.concatenate((x2[0].flatten(), x2[1].flatten(), x2[2].flatten()))) + res_STR = OPS_STR.transpose_Q2_dot(np.concatenate((x2[0].flatten(), x2[1].flatten(), x2[2].flatten()))) res_STR_0, res_STR_1, res_STR_2 = SPACES.extract_2(res_STR) MPI_COMM.Barrier() @@ -408,7 +408,7 @@ def test_some_basis_ops(Nel, p, spl_kind, mapping): print("\nX1 (V1 --> V0 x V0 x V0):") res_PSY = OPS_PSY.X1.dot(x1_st) - res_STR = OPS_STR.X1_dot(xp.concatenate((x1[0].flatten(), x1[1].flatten(), x1[2].flatten()))) + res_STR = OPS_STR.X1_dot(np.concatenate((x1[0].flatten(), x1[1].flatten(), x1[2].flatten()))) res_STR_0 = SPACES.extract_0(res_STR[0]) res_STR_1 = SPACES.extract_0(res_STR[1]) res_STR_2 = SPACES.extract_0(res_STR[2]) @@ -460,11 +460,10 @@ def test_some_basis_ops(Nel, p, spl_kind, mapping): @pytest.mark.parametrize("spl_kind", [[False, True, True], [False, True, False]]) @pytest.mark.parametrize( "dirichlet_bc", - [None, [(False, True), (False, False), (False, True)], [(False, False), (False, False), (True, False)]], + [None, [[False, True], [False, False], [False, True]], [[False, False], [False, False], [True, False]]], ) @pytest.mark.parametrize("mapping", [["IGAPolarCylinder", {"a": 1.0, "Lz": 3.0}]]) def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.mhd_operators import MHDOperators @@ -475,6 +474,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal from struphy.fields_background.equils import ScrewPinch from struphy.geometry import domains from struphy.polar.basic import PolarVector + from struphy.utils.arrays import xp as np mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() @@ -503,7 +503,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) if show_plots: @@ -515,11 +515,9 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal if dirichlet_bc is not None: for i, knd in enumerate(spl_kind): if knd: - dirichlet_bc[i] = (False, False) + dirichlet_bc[i] = [False, False] else: - dirichlet_bc = [(False, False)] * 3 - - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[False, False]] * 3 # derham object nq_el = [p[0] + 1, p[1] + 1, p[2] + 1] @@ -584,11 +582,11 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal x2_pol_psy.tp = x2_psy x3_pol_psy.tp = x3_psy - xp.random.seed(1607) - x0_pol_psy.pol = [xp.random.rand(x0_pol_psy.pol[0].shape[0], x0_pol_psy.pol[0].shape[1])] - x1_pol_psy.pol = [xp.random.rand(x1_pol_psy.pol[n].shape[0], x1_pol_psy.pol[n].shape[1]) for n in range(3)] - x2_pol_psy.pol = [xp.random.rand(x2_pol_psy.pol[n].shape[0], x2_pol_psy.pol[n].shape[1]) for n in range(3)] - x3_pol_psy.pol = [xp.random.rand(x3_pol_psy.pol[0].shape[0], x3_pol_psy.pol[0].shape[1])] + np.random.seed(1607) + x0_pol_psy.pol = [np.random.rand(x0_pol_psy.pol[0].shape[0], x0_pol_psy.pol[0].shape[1])] + x1_pol_psy.pol = [np.random.rand(x1_pol_psy.pol[n].shape[0], x1_pol_psy.pol[n].shape[1]) for n in range(3)] + x2_pol_psy.pol = [np.random.rand(x2_pol_psy.pol[n].shape[0], x2_pol_psy.pol[n].shape[1]) for n in range(3)] + x3_pol_psy.pol = [np.random.rand(x3_pol_psy.pol[0].shape[0], x3_pol_psy.pol[0].shape[1])] # apply boundary conditions to legacy vectors for right shape x0_pol_str = space.B0.dot(x0_pol_psy.toarray(True)) @@ -614,7 +612,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal r_str = mhd_ops_str.PR(x3_pol_str) print(f"Rank {mpi_rank} | Asserting MHD operator K3.") - xp.allclose(space.B3.T.dot(r_str), r_psy.toarray(True)) + np.allclose(space.B3.T.dot(r_str), r_psy.toarray(True)) print(f"Rank {mpi_rank} | Assertion passed.") mpi_comm.Barrier() @@ -627,7 +625,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal r_str = mhd_ops_str.PR.T(x3_pol_str) print(f"Rank {mpi_rank} | Asserting transpose MHD operator K3.T.") - xp.allclose(space.B3.T.dot(r_str), r_psy.toarray(True)) + np.allclose(space.B3.T.dot(r_str), r_psy.toarray(True)) print(f"Rank {mpi_rank} | Assertion passed.") # ===== operator Q2 (V2 --> V2) ============ @@ -644,7 +642,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal r_str = mhd_ops_str.MF(x2_pol_str) print(f"Rank {mpi_rank} | Asserting MHD operator Q2.") - xp.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) + np.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) print(f"Rank {mpi_rank} | Assertion passed.") mpi_comm.Barrier() @@ -657,7 +655,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal r_str = mhd_ops_str.MF.T(x2_pol_str) print(f"Rank {mpi_rank} | Asserting transposed MHD operator Q2.T.") - xp.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) + np.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) print(f"Rank {mpi_rank} | Assertion passed.") # ===== operator T2 (V2 --> V1) ============ @@ -674,7 +672,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal r_str = mhd_ops_str.EF(x2_pol_str) print(f"Rank {mpi_rank} | Asserting MHD operator T2.") - xp.allclose(space.B1.T.dot(r_str), r_psy.toarray(True)) + np.allclose(space.B1.T.dot(r_str), r_psy.toarray(True)) print(f"Rank {mpi_rank} | Assertion passed.") mpi_comm.Barrier() @@ -687,7 +685,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal r_str = mhd_ops_str.EF.T(x1_pol_str) print(f"Rank {mpi_rank} | Asserting transposed MHD operator T2.T.") - xp.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) + np.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) print(f"Rank {mpi_rank} | Assertion passed.") # ===== operator S2 (V2 --> V2) ============ @@ -704,7 +702,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal r_str = mhd_ops_str.PF(x2_pol_str) print(f"Rank {mpi_rank} | Asserting MHD operator S2.") - xp.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) + np.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) print(f"Rank {mpi_rank} | Assertion passed.") mpi_comm.Barrier() @@ -717,7 +715,7 @@ def test_basis_ops_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=Fal r_str = mhd_ops_str.PF.T(x2_pol_str) print(f"Rank {mpi_rank} | Asserting transposed MHD operator S2.T.") - xp.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) + np.allclose(space.B2.T.dot(r_str), r_psy.toarray(True)) print(f"Rank {mpi_rank} | Assertion passed.") @@ -726,7 +724,7 @@ def assert_ops(mpi_rank, res_PSY, res_STR, verbose=False, MPI_COMM=None): TODO """ - import cunumpy as xp + from struphy.utils.arrays import xp as np if verbose: if MPI_COMM is not None: @@ -789,8 +787,8 @@ def assert_ops(mpi_rank, res_PSY, res_STR, verbose=False, MPI_COMM=None): print( f"Rank {mpi_rank} | Maximum absolute diference (result):\n", - xp.max( - xp.abs( + np.max( + np.abs( res_PSY[ res_PSY.starts[0] : res_PSY.ends[0] + 1, res_PSY.starts[1] : res_PSY.ends[1] + 1, @@ -800,8 +798,8 @@ def assert_ops(mpi_rank, res_PSY, res_STR, verbose=False, MPI_COMM=None): res_PSY.starts[0] : res_PSY.ends[0] + 1, res_PSY.starts[1] : res_PSY.ends[1] + 1, res_PSY.starts[2] : res_PSY.ends[2] + 1, - ], - ), + ] + ) ), ) @@ -809,7 +807,7 @@ def assert_ops(mpi_rank, res_PSY, res_STR, verbose=False, MPI_COMM=None): MPI_COMM.Barrier() # Compare results. (Works only for Nel=[N, N, N] so far! TODO: Find this bug!) - assert xp.allclose( + assert np.allclose( res_PSY[ res_PSY.starts[0] : res_PSY.ends[0] + 1, res_PSY.starts[1] : res_PSY.ends[1] + 1, @@ -834,10 +832,5 @@ def assert_ops(mpi_rank, res_PSY, res_STR, verbose=False, MPI_COMM=None): # mapping=["Cuboid", {"l1": 0.0, "r1": 1.0, "l2": 0.0, "r2": 1.0, "l3": 0.0, "r3": 1.0}], # ) test_basis_ops_polar( - [6, 9, 7], - [2, 2, 3], - [False, True, True], - None, - ["IGAPolarCylinder", {"a": 1.0, "Lz": 3.0}], - False, + [6, 9, 7], [2, 2, 3], [False, True, True], None, ["IGAPolarCylinder", {"a": 1.0, "Lz": 3.0}], False ) diff --git a/src/struphy/feec/tests/test_derham.py b/src/struphy/feec/tests/test_derham.py index 1e857b5a2..e5cf181c9 100644 --- a/src/struphy/feec/tests/test_derham.py +++ b/src/struphy/feec/tests/test_derham.py @@ -7,7 +7,6 @@ def test_psydac_derham(Nel, p, spl_kind): """Remark: p=even projectors yield slightly different results, pass with atol=1e-3.""" - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from psydac.linalg.block import BlockVector from psydac.linalg.stencil import StencilVector @@ -15,6 +14,7 @@ def test_psydac_derham(Nel, p, spl_kind): from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space from struphy.feec.psydac_derham import Derham from struphy.feec.utilities import compare_arrays + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -47,11 +47,11 @@ def test_psydac_derham(Nel, p, spl_kind): N3_tot = DR_STR.Ntot_3form # Random vectors for testing - xp.random.seed(1981) - x0 = xp.random.rand(N0_tot) - x1 = xp.random.rand(xp.sum(N1_tot)) - x2 = xp.random.rand(xp.sum(N2_tot)) - x3 = xp.random.rand(N3_tot) + np.random.seed(1981) + x0 = np.random.rand(N0_tot) + x1 = np.random.rand(np.sum(N1_tot)) + x2 = np.random.rand(np.sum(N2_tot)) + x3 = np.random.rand(N3_tot) ############################ ### TEST STENCIL VECTORS ### @@ -70,9 +70,7 @@ def test_psydac_derham(Nel, p, spl_kind): # Assign from start to end index + 1 x0_PSY[s0[0] : e0[0] + 1, s0[1] : e0[1] + 1, s0[2] : e0[2] + 1] = DR_STR.extract_0(x0)[ - s0[0] : e0[0] + 1, - s0[1] : e0[1] + 1, - s0[2] : e0[2] + 1, + s0[0] : e0[0] + 1, s0[1] : e0[1] + 1, s0[2] : e0[2] + 1 ] # Block of StencilVecttors @@ -89,19 +87,13 @@ def test_psydac_derham(Nel, p, spl_kind): x11, x12, x13 = DR_STR.extract_1(x1) x1_PSY[0][s11[0] : e11[0] + 1, s11[1] : e11[1] + 1, s11[2] : e11[2] + 1] = x11[ - s11[0] : e11[0] + 1, - s11[1] : e11[1] + 1, - s11[2] : e11[2] + 1, + s11[0] : e11[0] + 1, s11[1] : e11[1] + 1, s11[2] : e11[2] + 1 ] x1_PSY[1][s12[0] : e12[0] + 1, s12[1] : e12[1] + 1, s12[2] : e12[2] + 1] = x12[ - s12[0] : e12[0] + 1, - s12[1] : e12[1] + 1, - s12[2] : e12[2] + 1, + s12[0] : e12[0] + 1, s12[1] : e12[1] + 1, s12[2] : e12[2] + 1 ] x1_PSY[2][s13[0] : e13[0] + 1, s13[1] : e13[1] + 1, s13[2] : e13[2] + 1] = x13[ - s13[0] : e13[0] + 1, - s13[1] : e13[1] + 1, - s13[2] : e13[2] + 1, + s13[0] : e13[0] + 1, s13[1] : e13[1] + 1, s13[2] : e13[2] + 1 ] x2_PSY = BlockVector(derham.Vh["2"]) @@ -117,19 +109,13 @@ def test_psydac_derham(Nel, p, spl_kind): x21, x22, x23 = DR_STR.extract_2(x2) x2_PSY[0][s21[0] : e21[0] + 1, s21[1] : e21[1] + 1, s21[2] : e21[2] + 1] = x21[ - s21[0] : e21[0] + 1, - s21[1] : e21[1] + 1, - s21[2] : e21[2] + 1, + s21[0] : e21[0] + 1, s21[1] : e21[1] + 1, s21[2] : e21[2] + 1 ] x2_PSY[1][s22[0] : e22[0] + 1, s22[1] : e22[1] + 1, s22[2] : e22[2] + 1] = x22[ - s22[0] : e22[0] + 1, - s22[1] : e22[1] + 1, - s22[2] : e22[2] + 1, + s22[0] : e22[0] + 1, s22[1] : e22[1] + 1, s22[2] : e22[2] + 1 ] x2_PSY[2][s23[0] : e23[0] + 1, s23[1] : e23[1] + 1, s23[2] : e23[2] + 1] = x23[ - s23[0] : e23[0] + 1, - s23[1] : e23[1] + 1, - s23[2] : e23[2] + 1, + s23[0] : e23[0] + 1, s23[1] : e23[1] + 1, s23[2] : e23[2] + 1 ] x3_PSY = StencilVector(derham.Vh["3"]) @@ -144,9 +130,7 @@ def test_psydac_derham(Nel, p, spl_kind): e3 = x3_PSY.ends x3_PSY[s3[0] : e3[0] + 1, s3[1] : e3[1] + 1, s3[2] : e3[2] + 1] = DR_STR.extract_3(x3)[ - s3[0] : e3[0] + 1, - s3[1] : e3[1] + 1, - s3[2] : e3[2] + 1, + s3[0] : e3[0] + 1, s3[1] : e3[1] + 1, s3[2] : e3[2] + 1 ] ######################## @@ -190,7 +174,7 @@ def test_psydac_derham(Nel, p, spl_kind): zero2_STR = curl_STR.dot(d1_STR) zero2_PSY = derham.curl.dot(d1_PSY) - assert xp.allclose(zero2_STR, xp.zeros_like(zero2_STR)) + assert np.allclose(zero2_STR, np.zeros_like(zero2_STR)) if rank == 0: print("\nCompare curl of grad:") compare_arrays(zero2_PSY, DR_STR.extract_2(zero2_STR), rank) @@ -199,7 +183,7 @@ def test_psydac_derham(Nel, p, spl_kind): zero3_STR = div_STR.dot(d2_STR) zero3_PSY = derham.div.dot(d2_PSY) - assert xp.allclose(zero3_STR, xp.zeros_like(zero3_STR)) + assert np.allclose(zero3_STR, np.zeros_like(zero3_STR)) if rank == 0: print("\nCompare div of curl:") compare_arrays(zero3_PSY, DR_STR.extract_3(zero3_STR), rank) @@ -217,7 +201,7 @@ def test_psydac_derham(Nel, p, spl_kind): # compare projectors def f(eta1, eta2, eta3): - return xp.sin(4 * xp.pi * eta1) * xp.cos(2 * xp.pi * eta2) + xp.exp(xp.cos(2 * xp.pi * eta3)) + return np.sin(4 * np.pi * eta1) * np.cos(2 * np.pi * eta2) + np.exp(np.cos(2 * np.pi * eta3)) fh0_STR = PI("0", f) fh0_PSY = derham.P["0"](f) diff --git a/src/struphy/feec/tests/test_eval_field.py b/src/struphy/feec/tests/test_eval_field.py index f9a00c18d..077b0f158 100644 --- a/src/struphy/feec/tests/test_eval_field.py +++ b/src/struphy/feec/tests/test_eval_field.py @@ -1,8 +1,9 @@ -import cunumpy as xp import pytest from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI +from struphy.utils.arrays import xp as np + @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @pytest.mark.parametrize("p", [[3, 2, 4]]) @@ -14,7 +15,6 @@ def test_eval_field(Nel, p, spl_kind): from struphy.feec.psydac_derham import Derham from struphy.feec.utilities import compare_arrays from struphy.geometry.base import Domain - from struphy.initial import perturbations comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -29,57 +29,84 @@ def test_eval_field(Nel, p, spl_kind): n3 = derham.create_spline_function("density", "L2") uv = derham.create_spline_function("velocity", "H1vec") - # initialize with sin/cos perturbations - pert_p0 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,)) - - pert_E1_1 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="1", comp=0) - pert_E1_2 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="1", comp=1) - pert_E1_3 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="1", comp=2) - - pert_B2_1 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="2", comp=0) - pert_B2_2 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="2", comp=1) - pert_B2_3 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="2", comp=2) + # initialize fields as forms + comps = { + "pressure": "0", + "e_field": ["1", "1", "1"], + "b_field": ["2", "2", "2"], + "density": "3", + "velocity": ["v", "v", "v"], + } - pert_n3 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,)) - - pert_uv_1 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="v", comp=0) - pert_uv_2 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="v", comp=1) - pert_uv_3 = perturbations.ModesCos(ls=(0,), ms=(0,), ns=(1,), amps=(5.0,), given_in_basis="v", comp=2) - - p0.initialize_coeffs(perturbations=pert_p0) - E1.initialize_coeffs(perturbations=[pert_E1_1, pert_E1_2, pert_E1_3]) - B2.initialize_coeffs(perturbations=[pert_B2_1, pert_B2_2, pert_B2_3]) - n3.initialize_coeffs(perturbations=pert_n3) - uv.initialize_coeffs(perturbations=[pert_uv_1, pert_uv_2, pert_uv_3]) + # initialize with sin/cos perturbations + pert_params_p0 = {"ModesCos": {"given_in_basis": "0", "ls": [0], "ms": [0], "ns": [1], "amps": [5.0]}} + + pert_params_E1 = { + "ModesCos": { + "given_in_basis": ["1", "1", "1"], + "ls": [[0], [0], [0]], + "ms": [[0], [0], [0]], + "ns": [[1], [1], [1]], + "amps": [[5.0], [5.0], [5.0]], + } + } + + pert_params_B2 = { + "ModesCos": { + "given_in_basis": ["2", "2", "2"], + "ls": [[0], [0], [0]], + "ms": [[0], [0], [0]], + "ns": [[1], [1], [1]], + "amps": [[5.0], [5.0], [5.0]], + } + } + + pert_params_n3 = {"ModesCos": {"given_in_basis": "3", "ls": [0], "ms": [0], "ns": [1], "amps": [5.0]}} + + pert_params_uv = { + "ModesCos": { + "given_in_basis": ["v", "v", "v"], + "ls": [[0], [0], [0]], + "ms": [[0], [0], [0]], + "ns": [[1], [1], [1]], + "amps": [[5.0], [5.0], [5.0]], + } + } + + p0.initialize_coeffs(pert_params=pert_params_p0) + E1.initialize_coeffs(pert_params=pert_params_E1) + B2.initialize_coeffs(pert_params=pert_params_B2) + n3.initialize_coeffs(pert_params=pert_params_n3) + uv.initialize_coeffs(pert_params=pert_params_uv) # evaluation points for meshgrid - eta1 = xp.linspace(0, 1, 11) - eta2 = xp.linspace(0, 1, 14) - eta3 = xp.linspace(0, 1, 18) + eta1 = np.linspace(0, 1, 11) + eta2 = np.linspace(0, 1, 14) + eta3 = np.linspace(0, 1, 18) # evaluation points for markers Np = 33 - markers = xp.random.rand(Np, 3) - markers_1 = xp.zeros((eta1.size, 3)) + markers = np.random.rand(Np, 3) + markers_1 = np.zeros((eta1.size, 3)) markers_1[:, 0] = eta1 - markers_2 = xp.zeros((eta2.size, 3)) + markers_2 = np.zeros((eta2.size, 3)) markers_2[:, 1] = eta2 - markers_3 = xp.zeros((eta3.size, 3)) + markers_3 = np.zeros((eta3.size, 3)) markers_3[:, 2] = eta3 # arrays for legacy evaluation arr1, arr2, arr3, is_sparse_meshgrid = Domain.prepare_eval_pts(eta1, eta2, eta3) - tmp = xp.zeros_like(arr1) + tmp = np.zeros_like(arr1) ###### # V0 # ###### # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(p0.vector.toarray(), p0.nbasis) + coeffs_loc = np.reshape(p0.vector.toarray(), p0.nbasis) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(p0.vector, coeffs, rank) @@ -101,12 +128,12 @@ def test_eval_field(Nel, p, spl_kind): tmp, 0, ) - val_legacy = xp.squeeze(tmp.copy()) + val_legacy = np.squeeze(tmp.copy()) tmp[:] = 0 # distributed evaluation and comparison val = p0(eta1, eta2, eta3, squeeze_out=True) - assert xp.allclose(val, val_legacy) + assert np.allclose(val, val_legacy) # marker evaluation m_vals = p0(markers) @@ -119,19 +146,19 @@ def test_eval_field(Nel, p, spl_kind): m_vals_ref_2 = p0(0.0, eta2, 0.0, squeeze_out=True) m_vals_ref_3 = p0(0.0, 0.0, eta3, squeeze_out=True) - assert xp.allclose(m_vals_1, m_vals_ref_1) - assert xp.allclose(m_vals_2, m_vals_ref_2) - assert xp.allclose(m_vals_3, m_vals_ref_3) + assert np.allclose(m_vals_1, m_vals_ref_1) + assert np.allclose(m_vals_2, m_vals_ref_2) + assert np.allclose(m_vals_3, m_vals_ref_3) ###### # V1 # ###### # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(E1.vector[0].toarray(), E1.nbasis[0]) + coeffs_loc = np.reshape(E1.vector[0].toarray(), E1.nbasis[0]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(E1.vector[0], coeffs, rank) @@ -153,15 +180,15 @@ def test_eval_field(Nel, p, spl_kind): tmp, 11, ) - val_legacy_1 = xp.squeeze(tmp.copy()) + val_legacy_1 = np.squeeze(tmp.copy()) tmp[:] = 0 # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(E1.vector[1].toarray(), E1.nbasis[1]) + coeffs_loc = np.reshape(E1.vector[1].toarray(), E1.nbasis[1]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(E1.vector[1], coeffs, rank) @@ -183,15 +210,15 @@ def test_eval_field(Nel, p, spl_kind): tmp, 12, ) - val_legacy_2 = xp.squeeze(tmp.copy()) + val_legacy_2 = np.squeeze(tmp.copy()) tmp[:] = 0 # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(E1.vector[2].toarray(), E1.nbasis[2]) + coeffs_loc = np.reshape(E1.vector[2].toarray(), E1.nbasis[2]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(E1.vector[2], coeffs, rank) @@ -213,14 +240,14 @@ def test_eval_field(Nel, p, spl_kind): tmp, 13, ) - val_legacy_3 = xp.squeeze(tmp.copy()) + val_legacy_3 = np.squeeze(tmp.copy()) tmp[:] = 0 # distributed evaluation and comparison val1, val2, val3 = E1(eta1, eta2, eta3, squeeze_out=True) - assert xp.allclose(val1, val_legacy_1) - assert xp.allclose(val2, val_legacy_2) - assert xp.allclose(val3, val_legacy_3) + assert np.allclose(val1, val_legacy_1) + assert np.allclose(val2, val_legacy_2) + assert np.allclose(val3, val_legacy_3) # marker evaluation m_vals = E1(markers) @@ -233,25 +260,25 @@ def test_eval_field(Nel, p, spl_kind): m_vals_ref_2 = E1(0.0, eta2, 0.0, squeeze_out=True) m_vals_ref_3 = E1(0.0, 0.0, eta3, squeeze_out=True) - assert xp.all( - [xp.allclose(m_vals_1_i, m_vals_ref_1_i) for m_vals_1_i, m_vals_ref_1_i in zip(m_vals_1, m_vals_ref_1)], + assert np.all( + [np.allclose(m_vals_1_i, m_vals_ref_1_i) for m_vals_1_i, m_vals_ref_1_i in zip(m_vals_1, m_vals_ref_1)] ) - assert xp.all( - [xp.allclose(m_vals_2_i, m_vals_ref_2_i) for m_vals_2_i, m_vals_ref_2_i in zip(m_vals_2, m_vals_ref_2)], + assert np.all( + [np.allclose(m_vals_2_i, m_vals_ref_2_i) for m_vals_2_i, m_vals_ref_2_i in zip(m_vals_2, m_vals_ref_2)] ) - assert xp.all( - [xp.allclose(m_vals_3_i, m_vals_ref_3_i) for m_vals_3_i, m_vals_ref_3_i in zip(m_vals_3, m_vals_ref_3)], + assert np.all( + [np.allclose(m_vals_3_i, m_vals_ref_3_i) for m_vals_3_i, m_vals_ref_3_i in zip(m_vals_3, m_vals_ref_3)] ) ###### # V2 # ###### # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(B2.vector[0].toarray(), B2.nbasis[0]) + coeffs_loc = np.reshape(B2.vector[0].toarray(), B2.nbasis[0]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(B2.vector[0], coeffs, rank) @@ -273,15 +300,15 @@ def test_eval_field(Nel, p, spl_kind): tmp, 21, ) - val_legacy_1 = xp.squeeze(tmp.copy()) + val_legacy_1 = np.squeeze(tmp.copy()) tmp[:] = 0 # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(B2.vector[1].toarray(), B2.nbasis[1]) + coeffs_loc = np.reshape(B2.vector[1].toarray(), B2.nbasis[1]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(B2.vector[1], coeffs, rank) @@ -303,15 +330,15 @@ def test_eval_field(Nel, p, spl_kind): tmp, 22, ) - val_legacy_2 = xp.squeeze(tmp.copy()) + val_legacy_2 = np.squeeze(tmp.copy()) tmp[:] = 0 # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(B2.vector[2].toarray(), B2.nbasis[2]) + coeffs_loc = np.reshape(B2.vector[2].toarray(), B2.nbasis[2]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(B2.vector[2], coeffs, rank) @@ -333,14 +360,14 @@ def test_eval_field(Nel, p, spl_kind): tmp, 23, ) - val_legacy_3 = xp.squeeze(tmp.copy()) + val_legacy_3 = np.squeeze(tmp.copy()) tmp[:] = 0 # distributed evaluation and comparison val1, val2, val3 = B2(eta1, eta2, eta3, squeeze_out=True) - assert xp.allclose(val1, val_legacy_1) - assert xp.allclose(val2, val_legacy_2) - assert xp.allclose(val3, val_legacy_3) + assert np.allclose(val1, val_legacy_1) + assert np.allclose(val2, val_legacy_2) + assert np.allclose(val3, val_legacy_3) # marker evaluation m_vals = B2(markers) @@ -353,25 +380,25 @@ def test_eval_field(Nel, p, spl_kind): m_vals_ref_2 = B2(0.0, eta2, 0.0, squeeze_out=True) m_vals_ref_3 = B2(0.0, 0.0, eta3, squeeze_out=True) - assert xp.all( - [xp.allclose(m_vals_1_i, m_vals_ref_1_i) for m_vals_1_i, m_vals_ref_1_i in zip(m_vals_1, m_vals_ref_1)], + assert np.all( + [np.allclose(m_vals_1_i, m_vals_ref_1_i) for m_vals_1_i, m_vals_ref_1_i in zip(m_vals_1, m_vals_ref_1)] ) - assert xp.all( - [xp.allclose(m_vals_2_i, m_vals_ref_2_i) for m_vals_2_i, m_vals_ref_2_i in zip(m_vals_2, m_vals_ref_2)], + assert np.all( + [np.allclose(m_vals_2_i, m_vals_ref_2_i) for m_vals_2_i, m_vals_ref_2_i in zip(m_vals_2, m_vals_ref_2)] ) - assert xp.all( - [xp.allclose(m_vals_3_i, m_vals_ref_3_i) for m_vals_3_i, m_vals_ref_3_i in zip(m_vals_3, m_vals_ref_3)], + assert np.all( + [np.allclose(m_vals_3_i, m_vals_ref_3_i) for m_vals_3_i, m_vals_ref_3_i in zip(m_vals_3, m_vals_ref_3)] ) ###### # V3 # ###### # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(n3.vector.toarray(), n3.nbasis) + coeffs_loc = np.reshape(n3.vector.toarray(), n3.nbasis) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(n3.vector, coeffs, rank) @@ -393,12 +420,12 @@ def test_eval_field(Nel, p, spl_kind): tmp, 3, ) - val_legacy = xp.squeeze(tmp.copy()) + val_legacy = np.squeeze(tmp.copy()) tmp[:] = 0 # distributed evaluation and comparison val = n3(eta1, eta2, eta3, squeeze_out=True) - assert xp.allclose(val, val_legacy) + assert np.allclose(val, val_legacy) # marker evaluation m_vals = n3(markers) @@ -411,19 +438,19 @@ def test_eval_field(Nel, p, spl_kind): m_vals_ref_2 = n3(0.0, eta2, 0.0, squeeze_out=True) m_vals_ref_3 = n3(0.0, 0.0, eta3, squeeze_out=True) - assert xp.allclose(m_vals_1, m_vals_ref_1) - assert xp.allclose(m_vals_2, m_vals_ref_2) - assert xp.allclose(m_vals_3, m_vals_ref_3) + assert np.allclose(m_vals_1, m_vals_ref_1) + assert np.allclose(m_vals_2, m_vals_ref_2) + assert np.allclose(m_vals_3, m_vals_ref_3) ######### # V0vec # ######### # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(uv.vector[0].toarray(), uv.nbasis[0]) + coeffs_loc = np.reshape(uv.vector[0].toarray(), uv.nbasis[0]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(uv.vector[0], coeffs, rank) @@ -445,15 +472,15 @@ def test_eval_field(Nel, p, spl_kind): tmp, 0, ) - val_legacy_1 = xp.squeeze(tmp.copy()) + val_legacy_1 = np.squeeze(tmp.copy()) tmp[:] = 0 # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(uv.vector[1].toarray(), uv.nbasis[1]) + coeffs_loc = np.reshape(uv.vector[1].toarray(), uv.nbasis[1]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(uv.vector[1], coeffs, rank) @@ -475,15 +502,15 @@ def test_eval_field(Nel, p, spl_kind): tmp, 0, ) - val_legacy_2 = xp.squeeze(tmp.copy()) + val_legacy_2 = np.squeeze(tmp.copy()) tmp[:] = 0 # create legacy arrays with same coeffs - coeffs_loc = xp.reshape(uv.vector[2].toarray(), uv.nbasis[2]) + coeffs_loc = np.reshape(uv.vector[2].toarray(), uv.nbasis[2]) if isinstance(comm, MockComm): coeffs = coeffs_loc else: - coeffs = xp.zeros_like(coeffs_loc) + coeffs = np.zeros_like(coeffs_loc) comm.Allreduce(coeffs_loc, coeffs, op=MPI.SUM) compare_arrays(uv.vector[2], coeffs, rank) @@ -505,14 +532,14 @@ def test_eval_field(Nel, p, spl_kind): tmp, 0, ) - val_legacy_3 = xp.squeeze(tmp.copy()) + val_legacy_3 = np.squeeze(tmp.copy()) tmp[:] = 0 # distributed evaluation and comparison val1, val2, val3 = uv(eta1, eta2, eta3, squeeze_out=True) - assert xp.allclose(val1, val_legacy_1) - assert xp.allclose(val2, val_legacy_2) - assert xp.allclose(val3, val_legacy_3) + assert np.allclose(val1, val_legacy_1) + assert np.allclose(val2, val_legacy_2) + assert np.allclose(val3, val_legacy_3) # marker evaluation m_vals = uv(markers) @@ -525,18 +552,16 @@ def test_eval_field(Nel, p, spl_kind): m_vals_ref_2 = uv(0.0, eta2, 0.0, squeeze_out=True) m_vals_ref_3 = uv(0.0, 0.0, eta3, squeeze_out=True) - assert xp.all( - [xp.allclose(m_vals_1_i, m_vals_ref_1_i) for m_vals_1_i, m_vals_ref_1_i in zip(m_vals_1, m_vals_ref_1)], + assert np.all( + [np.allclose(m_vals_1_i, m_vals_ref_1_i) for m_vals_1_i, m_vals_ref_1_i in zip(m_vals_1, m_vals_ref_1)] ) - assert xp.all( - [xp.allclose(m_vals_2_i, m_vals_ref_2_i) for m_vals_2_i, m_vals_ref_2_i in zip(m_vals_2, m_vals_ref_2)], + assert np.all( + [np.allclose(m_vals_2_i, m_vals_ref_2_i) for m_vals_2_i, m_vals_ref_2_i in zip(m_vals_2, m_vals_ref_2)] ) - assert xp.all( - [xp.allclose(m_vals_3_i, m_vals_ref_3_i) for m_vals_3_i, m_vals_ref_3_i in zip(m_vals_3, m_vals_ref_3)], + assert np.all( + [np.allclose(m_vals_3_i, m_vals_ref_3_i) for m_vals_3_i, m_vals_ref_3_i in zip(m_vals_3, m_vals_ref_3)] ) - print("\nAll assertions passed.") - if __name__ == "__main__": test_eval_field([8, 9, 10], [3, 2, 4], [False, False, True]) diff --git a/src/struphy/feec/tests/test_field_init.py b/src/struphy/feec/tests/test_field_init.py index 2f0da1611..9bcab94fd 100644 --- a/src/struphy/feec/tests/test_field_init.py +++ b/src/struphy/feec/tests/test_field_init.py @@ -9,11 +9,10 @@ def test_bckgr_init_const(Nel, p, spl_kind, spaces, vec_comps): """Test field background initialization of "LogicalConst" with multiple fields in params.""" - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.feec.psydac_derham import Derham - from struphy.io.options import FieldsBackground + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -22,14 +21,14 @@ def test_bckgr_init_const(Nel, p, spl_kind, spaces, vec_comps): derham = Derham(Nel, p, spl_kind, comm=comm) # evaluation grids for comparisons - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) - meshgrids = xp.meshgrid(e1, e2, e3, indexing="ij") + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) + meshgrids = np.meshgrid(e1, e2, e3, indexing="ij") # test values - xp.random.seed(1234) - val = xp.random.rand() + np.random.seed(1234) + val = np.random.rand() if val > 0.5: val = int(val * 10) @@ -37,23 +36,23 @@ def test_bckgr_init_const(Nel, p, spl_kind, spaces, vec_comps): for i, space in enumerate(spaces): field = derham.create_spline_function("name_" + str(i), space) if space in ("H1", "L2"): - background = FieldsBackground(type="LogicalConst", values=(val,)) - field.initialize_coeffs(backgrounds=background) + bckgr_params = {"LogicalConst": {"values": val}} + field.initialize_coeffs(bckgr_params=bckgr_params) print( - f"\n{rank =}, {space =}, after init:\n {xp.max(xp.abs(field(*meshgrids) - val)) =}", + f"\n{rank = }, {space = }, after init:\n {np.max(np.abs(field(*meshgrids) - val)) = }", ) # print(f'{field(*meshgrids) = }') - assert xp.allclose(field(*meshgrids), val) + assert np.allclose(field(*meshgrids), val) else: - background = FieldsBackground(type="LogicalConst", values=(val, None, val)) - field.initialize_coeffs(backgrounds=background) - for j, val in enumerate(background.values): - if val is not None: + bckgr_params = {"LogicalConst": {"values": [val, None, val]}} + field.initialize_coeffs(bckgr_params=bckgr_params) + for j in range(3): + if bckgr_params["LogicalConst"]["values"][j]: print( - f"\n{rank =}, {space =}, after init:\n {j =}, {xp.max(xp.abs(field(*meshgrids)[j] - val)) =}", + f"\n{rank = }, {space = }, after init:\n {j = }, {np.max(np.abs(field(*meshgrids)[j] - val)) = }", ) # print(f'{field(*meshgrids)[i] = }') - assert xp.allclose(field(*meshgrids)[j], val) + assert np.allclose(field(*meshgrids)[j], val) @pytest.mark.parametrize("Nel", [[18, 24, 12]]) @@ -64,15 +63,14 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show import inspect - import cunumpy as xp from matplotlib import pyplot as plt from psydac.ddm.mpi import mpi as MPI from struphy.feec.psydac_derham import Derham from struphy.fields_background import equils - from struphy.fields_background.base import FluidEquilibrium, FluidEquilibriumWithB + from struphy.fields_background.base import FluidEquilibriumWithB from struphy.geometry import domains - from struphy.io.options import FieldsBackground + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -81,35 +79,32 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show derham = Derham(Nel, p, spl_kind, comm=comm) # background parameters - bckgr_0 = FieldsBackground(type="FluidEquilibrium", variable="absB0") - bckgr_1 = FieldsBackground(type="FluidEquilibrium", variable="u1") - bckgr_2 = FieldsBackground(type="FluidEquilibrium", variable="u2") - bckgr_3 = FieldsBackground(type="FluidEquilibrium", variable="p3") - bckgr_4 = FieldsBackground(type="FluidEquilibrium", variable="uv") + bckgr_params_0 = {"MHD": {"variable": "absB0"}} + bckgr_params_1 = {"MHD": {"variable": "u1"}} + bckgr_params_2 = {"MHD": {"variable": "u2"}} + bckgr_params_3 = {"MHD": {"variable": "p3"}} + bckgr_params_4 = {"MHD": {"variable": "uv"}} # evaluation grids for comparisons - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) - meshgrids = xp.meshgrid(e1, e2, e3, indexing="ij") + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) + meshgrids = np.meshgrid(e1, e2, e3, indexing="ij") # test for key, val in inspect.getmembers(equils): if inspect.isclass(val) and val.__module__ == equils.__name__: - print(f"{key =}") + print(f"{key = }") if "DESC" in key and not with_desc: - print(f"Attention: {with_desc =}, DESC not tested here !!") + print(f"Attention: {with_desc = }, DESC not tested here !!") continue if "GVEC" in key and not with_gvec: - print(f"Attention: {with_gvec =}, GVEC not tested here !!") + print(f"Attention: {with_gvec = }, GVEC not tested here !!") continue mhd_equil = val() - if not isinstance(mhd_equil, FluidEquilibriumWithB): - continue - - print(f"{mhd_equil.params =}") + print(f"{mhd_equil.params = }") if "AdhocTorus" in key: mhd_equil.domain = domains.HollowTorus( @@ -132,8 +127,8 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show elif "ShearedSlab" in key: mhd_equil.domain = domains.Cuboid( r1=mhd_equil.params["a"], - r2=mhd_equil.params["a"] * 2 * xp.pi, - r3=mhd_equil.params["R0"] * 2 * xp.pi, + r2=mhd_equil.params["a"] * 2 * np.pi, + r3=mhd_equil.params["R0"] * 2 * np.pi, ) elif "ShearFluid" in key: mhd_equil.domain = domains.Cuboid( @@ -145,7 +140,7 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show mhd_equil.domain = domains.HollowCylinder( a1=1e-3, a2=mhd_equil.params["a"], - Lz=mhd_equil.params["R0"] * 2 * xp.pi, + Lz=mhd_equil.params["R0"] * 2 * np.pi, ) else: try: @@ -156,87 +151,93 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show field_0 = derham.create_spline_function( "name_0", "H1", - backgrounds=bckgr_0, - equil=mhd_equil, + bckgr_params=bckgr_params_0, ) field_1 = derham.create_spline_function( "name_1", "Hcurl", - backgrounds=bckgr_1, - equil=mhd_equil, + bckgr_params=bckgr_params_1, ) field_2 = derham.create_spline_function( "name_2", "Hdiv", - backgrounds=bckgr_2, - equil=mhd_equil, + bckgr_params=bckgr_params_2, ) field_3 = derham.create_spline_function( "name_3", "L2", - backgrounds=bckgr_3, - equil=mhd_equil, + bckgr_params=bckgr_params_3, ) field_4 = derham.create_spline_function( "name_4", "H1vec", - backgrounds=bckgr_4, - equil=mhd_equil, + bckgr_params=bckgr_params_4, ) + field_1.initialize_coeffs(bckgr_obj=mhd_equil) + print("field_1 initialized.") + field_2.initialize_coeffs(bckgr_obj=mhd_equil) + print("field_2 initialized.") + field_3.initialize_coeffs(bckgr_obj=mhd_equil) + print("field_3 initialized.") + field_4.initialize_coeffs(bckgr_obj=mhd_equil) + print("field_4 initialized.") + # scalar spaces print( - f"{xp.max(xp.abs(field_3(*meshgrids) - mhd_equil.p3(*meshgrids))) / xp.max(xp.abs(mhd_equil.p3(*meshgrids)))}", + f"{np.max(np.abs(field_3(*meshgrids) - mhd_equil.p3(*meshgrids))) / np.max(np.abs(mhd_equil.p3(*meshgrids)))}" ) assert ( - xp.max( - xp.abs(field_3(*meshgrids) - mhd_equil.p3(*meshgrids)), + np.max( + np.abs(field_3(*meshgrids) - mhd_equil.p3(*meshgrids)), ) - / xp.max(xp.abs(mhd_equil.p3(*meshgrids))) + / np.max(np.abs(mhd_equil.p3(*meshgrids))) < 0.54 ) if isinstance(mhd_equil, FluidEquilibriumWithB): + field_0.initialize_coeffs(bckgr_obj=mhd_equil) + print("field_0 initialized.") print( - f"{xp.max(xp.abs(field_0(*meshgrids) - mhd_equil.absB0(*meshgrids))) / xp.max(xp.abs(mhd_equil.absB0(*meshgrids)))}", + f"{np.max(np.abs(field_0(*meshgrids) - mhd_equil.absB0(*meshgrids))) / np.max(np.abs(mhd_equil.absB0(*meshgrids)))}" ) assert ( - xp.max( - xp.abs(field_0(*meshgrids) - mhd_equil.absB0(*meshgrids)), + np.max( + np.abs(field_0(*meshgrids) - mhd_equil.absB0(*meshgrids)), ) - / xp.max(xp.abs(mhd_equil.absB0(*meshgrids))) + / np.max(np.abs(mhd_equil.absB0(*meshgrids))) < 0.057 ) print("Scalar asserts passed.") # vector-valued spaces ref = mhd_equil.u1(*meshgrids) - if xp.max(xp.abs(ref[0])) < 1e-11: + if np.max(np.abs(ref[0])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[0])) + denom = np.max(np.abs(ref[0])) print( - f"{xp.max(xp.abs(field_1(*meshgrids)[0] - ref[0])) / denom =}", + f"{np.max(np.abs(field_1(*meshgrids)[0] - ref[0])) / denom = }", ) - assert xp.max(xp.abs(field_1(*meshgrids)[0] - ref[0])) / denom < 0.28 - if xp.max(xp.abs(ref[1])) < 1e-11: + assert np.max(np.abs(field_1(*meshgrids)[0] - ref[0])) / denom < 0.28 + if np.max(np.abs(ref[1])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[1])) + denom = np.max(np.abs(ref[1])) print( - f"{xp.max(xp.abs(field_1(*meshgrids)[1] - ref[1])) / denom =}", + f"{np.max(np.abs(field_1(*meshgrids)[1] - ref[1])) / denom = }", ) - assert xp.max(xp.abs(field_1(*meshgrids)[1] - ref[1])) / denom < 0.33 - if xp.max(xp.abs(ref[2])) < 1e-11: + assert np.max(np.abs(field_1(*meshgrids)[1] - ref[1])) / denom < 0.33 + if np.max(np.abs(ref[2])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[2])) + denom = np.max(np.abs(ref[2])) print( - f"{xp.max(xp.abs(field_1(*meshgrids)[2] - ref[2])) / denom =}", + f"{np.max(np.abs(field_1(*meshgrids)[2] - ref[2])) / denom = }", ) assert ( - xp.max( - xp.abs( + np.max( + np.abs( field_1(*meshgrids)[2] - ref[2], ), ) @@ -246,75 +247,75 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show print("u1 asserts passed.") ref = mhd_equil.u2(*meshgrids) - if xp.max(xp.abs(ref[0])) < 1e-11: + if np.max(np.abs(ref[0])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[0])) + denom = np.max(np.abs(ref[0])) print( - f"{xp.max(xp.abs(field_2(*meshgrids)[0] - ref[0])) / denom =}", + f"{np.max(np.abs(field_2(*meshgrids)[0] - ref[0])) / denom = }", ) - assert xp.max(xp.abs(field_2(*meshgrids)[0] - ref[0])) / denom < 0.86 - if xp.max(xp.abs(ref[1])) < 1e-11: + assert np.max(np.abs(field_2(*meshgrids)[0] - ref[0])) / denom < 0.86 + if np.max(np.abs(ref[1])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[1])) + denom = np.max(np.abs(ref[1])) print( - f"{xp.max(xp.abs(field_2(*meshgrids)[1] - ref[1])) / denom =}", + f"{np.max(np.abs(field_2(*meshgrids)[1] - ref[1])) / denom = }", ) assert ( - xp.max( - xp.abs( + np.max( + np.abs( field_2(*meshgrids)[1] - ref[1], ), ) / denom < 0.4 ) - if xp.max(xp.abs(ref[2])) < 1e-11: + if np.max(np.abs(ref[2])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[2])) + denom = np.max(np.abs(ref[2])) print( - f"{xp.max(xp.abs(field_2(*meshgrids)[2] - ref[2])) / denom =}", + f"{np.max(np.abs(field_2(*meshgrids)[2] - ref[2])) / denom = }", ) - assert xp.max(xp.abs(field_2(*meshgrids)[2] - ref[2])) / denom < 0.21 + assert np.max(np.abs(field_2(*meshgrids)[2] - ref[2])) / denom < 0.21 print("u2 asserts passed.") ref = mhd_equil.uv(*meshgrids) - if xp.max(xp.abs(ref[0])) < 1e-11: + if np.max(np.abs(ref[0])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[0])) + denom = np.max(np.abs(ref[0])) print( - f"{xp.max(xp.abs(field_4(*meshgrids)[0] - ref[0])) / denom =}", + f"{np.max(np.abs(field_4(*meshgrids)[0] - ref[0])) / denom = }", ) - assert xp.max(xp.abs(field_4(*meshgrids)[0] - ref[0])) / denom < 0.6 - if xp.max(xp.abs(ref[1])) < 1e-11: + assert np.max(np.abs(field_4(*meshgrids)[0] - ref[0])) / denom < 0.6 + if np.max(np.abs(ref[1])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[1])) + denom = np.max(np.abs(ref[1])) print( - f"{xp.max(xp.abs(field_4(*meshgrids)[1] - ref[1])) / denom =}", + f"{np.max(np.abs(field_4(*meshgrids)[1] - ref[1])) / denom = }", ) assert ( - xp.max( - xp.abs( + np.max( + np.abs( field_4(*meshgrids)[1] - ref[1], ), ) / denom < 0.2 ) - if xp.max(xp.abs(ref[2])) < 1e-11: + if np.max(np.abs(ref[2])) < 1e-11: denom = 1.0 else: - denom = xp.max(xp.abs(ref[2])) + denom = np.max(np.abs(ref[2])) print( - f"{xp.max(xp.abs(field_4(*meshgrids)[2] - ref[2])) / denom =}", + f"{np.max(np.abs(field_4(*meshgrids)[2] - ref[2])) / denom = }", ) assert ( - xp.max( - xp.abs( + np.max( + np.abs( field_4(*meshgrids)[2] - ref[2], ), ) @@ -325,27 +326,27 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show # plotting fields with equilibrium if show_plot and rank == 0: - plt.figure(f"0/3-forms top, {mhd_equil =}", figsize=(24, 16)) + plt.figure(f"0/3-forms top, {mhd_equil = }", figsize=(24, 16)) plt.figure( - f"0/3-forms poloidal, {mhd_equil =}", + f"0/3-forms poloidal, {mhd_equil = }", figsize=(24, 16), ) - plt.figure(f"1-forms top, {mhd_equil =}", figsize=(24, 16)) + plt.figure(f"1-forms top, {mhd_equil = }", figsize=(24, 16)) plt.figure( - f"1-forms poloidal, {mhd_equil =}", + f"1-forms poloidal, {mhd_equil = }", figsize=(24, 16), ) - plt.figure(f"2-forms top, {mhd_equil =}", figsize=(24, 16)) + plt.figure(f"2-forms top, {mhd_equil = }", figsize=(24, 16)) plt.figure( - f"2-forms poloidal, {mhd_equil =}", + f"2-forms poloidal, {mhd_equil = }", figsize=(24, 16), ) plt.figure( - f"vector-fields top, {mhd_equil =}", + f"vector-fields top, {mhd_equil = }", figsize=(24, 16), ) plt.figure( - f"vector-fields poloidal, {mhd_equil =}", + f"vector-fields poloidal, {mhd_equil = }", figsize=(24, 16), ) x, y, z = mhd_equil.domain(*meshgrids) @@ -355,9 +356,9 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show absB0_h = mhd_equil.domain.push(field_0, *meshgrids) absB0 = mhd_equil.domain.push(mhd_equil.absB0, *meshgrids) - levels = xp.linspace(xp.min(absB0) - 1e-10, xp.max(absB0), 20) + levels = np.linspace(np.min(absB0) - 1e-10, np.max(absB0), 20) - plt.figure(f"0/3-forms top, {mhd_equil =}") + plt.figure(f"0/3-forms top, {mhd_equil = }") plt.subplot(2, 3, 1) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -443,7 +444,7 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show plt.colorbar() plt.title("reference, top view (e1-e3)") - plt.figure(f"0/3-forms poloidal, {mhd_equil =}") + plt.figure(f"0/3-forms poloidal, {mhd_equil = }") plt.subplot(2, 3, 1) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -493,9 +494,9 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show p3_h = mhd_equil.domain.push(field_3, *meshgrids) p3 = mhd_equil.domain.push(mhd_equil.p3, *meshgrids) - levels = xp.linspace(xp.min(p3) - 1e-10, xp.max(p3), 20) + levels = np.linspace(np.min(p3) - 1e-10, np.max(p3), 20) - plt.figure(f"0/3-forms top, {mhd_equil =}") + plt.figure(f"0/3-forms top, {mhd_equil = }") plt.subplot(2, 3, 2) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -581,7 +582,7 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show plt.colorbar() plt.title("reference, top view (e1-e3)") - plt.figure(f"0/3-forms poloidal, {mhd_equil =}") + plt.figure(f"0/3-forms poloidal, {mhd_equil = }") plt.subplot(2, 3, 2) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -640,9 +641,9 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show ) for i, (bh, b) in enumerate(zip(b1h, b1)): - levels = xp.linspace(xp.min(b) - 1e-10, xp.max(b), 20) + levels = np.linspace(np.min(b) - 1e-10, np.max(b), 20) - plt.figure(f"1-forms top, {mhd_equil =}") + plt.figure(f"1-forms top, {mhd_equil = }") plt.subplot(2, 3, 1 + i) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -728,7 +729,7 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show plt.colorbar() plt.title("reference, top view (e1-e3)") - plt.figure(f"1-forms poloidal, {mhd_equil =}") + plt.figure(f"1-forms poloidal, {mhd_equil = }") plt.subplot(2, 3, 1 + i) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -789,9 +790,9 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show ) for i, (bh, b) in enumerate(zip(b2h, b2)): - levels = xp.linspace(xp.min(b) - 1e-10, xp.max(b), 20) + levels = np.linspace(np.min(b) - 1e-10, np.max(b), 20) - plt.figure(f"2-forms top, {mhd_equil =}") + plt.figure(f"2-forms top, {mhd_equil = }") plt.subplot(2, 3, 1 + i) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -877,7 +878,7 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show plt.colorbar() plt.title("reference, top view (e1-e3)") - plt.figure(f"2-forms poloidal, {mhd_equil =}") + plt.figure(f"2-forms poloidal, {mhd_equil = }") plt.subplot(2, 3, 1 + i) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -938,9 +939,9 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show ) for i, (bh, b) in enumerate(zip(bvh, bv)): - levels = xp.linspace(xp.min(b) - 1e-10, xp.max(b), 20) + levels = np.linspace(np.min(b) - 1e-10, np.max(b), 20) - plt.figure(f"vector-fields top, {mhd_equil =}") + plt.figure(f"vector-fields top, {mhd_equil = }") plt.subplot(2, 3, 1 + i) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -1026,7 +1027,7 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show plt.colorbar() plt.title("reference, top view (e1-e3)") - plt.figure(f"vector-fields poloidal, {mhd_equil =}") + plt.figure(f"vector-fields poloidal, {mhd_equil = }") plt.subplot(2, 3, 1 + i) if "Slab" in key or "Pinch" in key: plt.contourf( @@ -1083,40 +1084,34 @@ def test_bckgr_init_mhd(Nel, p, spl_kind, with_desc=False, with_gvec=False, show def test_sincos_init_const(Nel, p, spl_kind, show_plot=False): """Test field perturbation with ModesSin + ModesCos on top of of "LogicalConst" with multiple fields in params.""" - import cunumpy as xp from matplotlib import pyplot as plt from psydac.ddm.mpi import mpi as MPI from struphy.feec.psydac_derham import Derham from struphy.initial.perturbations import ModesCos, ModesSin - from struphy.io.options import FieldsBackground + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() # background parameters - avg_0 = (1.2,) - avg_1 = (0.0, 2.6, 3.7) - avg_2 = (2, 3, 4.2) + avg_0 = 1.2 + avg_1 = [None, 2.6, 3.7] + avg_2 = [2, 3, 4.2] - bckgr_0 = FieldsBackground(type="LogicalConst", values=avg_0) - bckgr_1 = FieldsBackground(type="LogicalConst", values=avg_1) - bckgr_2 = FieldsBackground(type="LogicalConst", values=avg_2) + bckgr_params_0 = {"LogicalConst": {"values": avg_0}} + bckgr_params_1 = {"LogicalConst": {"values": avg_1}} + bckgr_params_2 = {"LogicalConst": {"values": avg_2}} # perturbations ms_s = [0, 2] ns_s = [1, 1] amps = [0.2] - f_sin_0 = ModesSin(ms=ms_s, ns=ns_s, amps=amps) - f_sin_11 = ModesSin(ms=ms_s, ns=ns_s, amps=amps, given_in_basis="1", comp=0) - f_sin_13 = ModesSin(ms=ms_s, ns=ns_s, amps=amps, given_in_basis="1", comp=2) + f_sin = ModesSin(ms=ms_s, ns=ns_s, amps=amps) ms_c = [1] ns_c = [0] - f_cos_0 = ModesCos(ms=ms_c, ns=ns_c, amps=amps) - f_cos_11 = ModesCos(ms=ms_c, ns=ns_c, amps=amps, given_in_basis="1", comp=0) - f_cos_12 = ModesCos(ms=ms_c, ns=ns_c, amps=amps, given_in_basis="1", comp=1) - f_cos_22 = ModesCos(ms=ms_c, ns=ns_c, amps=amps, given_in_basis="2", comp=1) + f_cos = ModesCos(ms=ms_c, ns=ns_c, amps=amps) pert_params_0 = { "ModesSin": { @@ -1160,31 +1155,38 @@ def test_sincos_init_const(Nel, p, spl_kind, show_plot=False): # Psydac discrete Derham sequence and fields derham = Derham(Nel, p, spl_kind, comm=comm) - field_0 = derham.create_spline_function("name_0", "H1", backgrounds=bckgr_0, perturbations=[f_sin_0, f_cos_0]) - field_1 = derham.create_spline_function( - "name_1", - "Hcurl", - backgrounds=bckgr_1, - perturbations=[f_sin_11, f_sin_13, f_cos_11, f_cos_12], - ) - field_2 = derham.create_spline_function("name_2", "Hdiv", backgrounds=bckgr_2, perturbations=[f_cos_22]) + field_0 = derham.create_spline_function("name_0", "H1") + field_1 = derham.create_spline_function("name_1", "Hcurl") + field_2 = derham.create_spline_function("name_2", "Hdiv") + + field_0.initialize_coeffs(bckgr_params=bckgr_params_0, pert_params=pert_params_0) + field_1.initialize_coeffs(bckgr_params=bckgr_params_1, pert_params=pert_params_1) + field_2.initialize_coeffs(bckgr_params=bckgr_params_2, pert_params=pert_params_2) # evaluation grids for comparisons - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) - meshgrids = xp.meshgrid(e1, e2, e3, indexing="ij") + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) + meshgrids = np.meshgrid(e1, e2, e3, indexing="ij") + + fun_0 = avg_0 + f_sin(*meshgrids) + f_cos(*meshgrids) - fun_0 = avg_0 + f_sin_0(*meshgrids) + f_cos_0(*meshgrids) + for i, a in enumerate(avg_1): + if a is None: + avg_1[i] = 0.0 + + for i, a in enumerate(avg_2): + if a is None: + avg_2[i] = 0.0 fun_1 = [ - avg_1[0] + f_sin_11(*meshgrids) + f_cos_11(*meshgrids), - avg_1[1] + f_cos_12(*meshgrids), - avg_1[2] + f_sin_13(*meshgrids), + avg_1[0] + f_sin(*meshgrids) + +f_cos(*meshgrids), + avg_1[1] + f_cos(*meshgrids), + avg_1[2] + f_sin(*meshgrids), ] fun_2 = [ avg_2[0] + 0.0 * meshgrids[0], - avg_2[1] + f_cos_22(*meshgrids), + avg_2[1] + f_cos(*meshgrids), avg_2[2] + 0.0 * meshgrids[0], ] @@ -1192,24 +1194,24 @@ def test_sincos_init_const(Nel, p, spl_kind, show_plot=False): f1_h = field_1(*meshgrids) f2_h = field_2(*meshgrids) - print(f"{xp.max(xp.abs(fun_0 - f0_h)) =}") - print(f"{xp.max(xp.abs(fun_1[0] - f1_h[0])) =}") - print(f"{xp.max(xp.abs(fun_1[1] - f1_h[1])) =}") - print(f"{xp.max(xp.abs(fun_1[2] - f1_h[2])) =}") - print(f"{xp.max(xp.abs(fun_2[0] - f2_h[0])) =}") - print(f"{xp.max(xp.abs(fun_2[1] - f2_h[1])) =}") - print(f"{xp.max(xp.abs(fun_2[2] - f2_h[2])) =}") - - assert xp.max(xp.abs(fun_0 - f0_h)) < 3e-5 - assert xp.max(xp.abs(fun_1[0] - f1_h[0])) < 3e-5 - assert xp.max(xp.abs(fun_1[1] - f1_h[1])) < 3e-5 - assert xp.max(xp.abs(fun_1[2] - f1_h[2])) < 3e-5 - assert xp.max(xp.abs(fun_2[0] - f2_h[0])) < 3e-5 - assert xp.max(xp.abs(fun_2[1] - f2_h[1])) < 3e-5 - assert xp.max(xp.abs(fun_2[2] - f2_h[2])) < 3e-5 + print(f"{np.max(np.abs(fun_0 - f0_h)) = }") + print(f"{np.max(np.abs(fun_1[0] - f1_h[0])) = }") + print(f"{np.max(np.abs(fun_1[1] - f1_h[1])) = }") + print(f"{np.max(np.abs(fun_1[2] - f1_h[2])) = }") + print(f"{np.max(np.abs(fun_2[0] - f2_h[0])) = }") + print(f"{np.max(np.abs(fun_2[1] - f2_h[1])) = }") + print(f"{np.max(np.abs(fun_2[2] - f2_h[2])) = }") + + assert np.max(np.abs(fun_0 - f0_h)) < 3e-5 + assert np.max(np.abs(fun_1[0] - f1_h[0])) < 3e-5 + assert np.max(np.abs(fun_1[1] - f1_h[1])) < 3e-5 + assert np.max(np.abs(fun_1[2] - f1_h[2])) < 3e-5 + assert np.max(np.abs(fun_2[0] - f2_h[0])) < 3e-5 + assert np.max(np.abs(fun_2[1] - f2_h[1])) < 3e-5 + assert np.max(np.abs(fun_2[2] - f2_h[2])) < 3e-5 if show_plot and rank == 0: - levels = xp.linspace(xp.min(fun_0) - 1e-10, xp.max(fun_0), 40) + levels = np.linspace(np.min(fun_0) - 1e-10, np.max(fun_0), 40) plt.figure("0-form", figsize=(10, 16)) plt.subplot(2, 1, 1) @@ -1242,7 +1244,7 @@ def test_sincos_init_const(Nel, p, spl_kind, show_plot=False): plt.figure("1-form", figsize=(30, 16)) for i, (f_h, fun) in enumerate(zip(f1_h, fun_1)): - levels = xp.linspace(xp.min(fun) - 1e-10, xp.max(fun), 40) + levels = np.linspace(np.min(fun) - 1e-10, np.max(fun), 40) plt.subplot(2, 3, 1 + i) plt.contourf( @@ -1274,7 +1276,7 @@ def test_sincos_init_const(Nel, p, spl_kind, show_plot=False): plt.figure("2-form", figsize=(30, 16)) for i, (f_h, fun) in enumerate(zip(f2_h, fun_2)): - levels = xp.linspace(xp.min(fun) - 1e-10, xp.max(fun), 40) + levels = np.linspace(np.min(fun) - 1e-10, np.max(fun), 40) plt.subplot(2, 3, 1 + i) plt.contourf( @@ -1315,12 +1317,11 @@ def test_sincos_init_const(Nel, p, spl_kind, show_plot=False): def test_noise_init(Nel, p, spl_kind, space, direction): """Only tests 1d noise ('e1', 'e2', 'e3') !!""" - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.feec.psydac_derham import Derham from struphy.feec.utilities import compare_arrays - from struphy.initial.perturbations import Noise + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -1333,10 +1334,16 @@ def test_noise_init(Nel, p, spl_kind, space, direction): field_np = derham_np.create_spline_function("field", space) # initial conditions - pert = Noise(direction=direction, amp=0.0001, seed=1234, comp=0) - - field.initialize_coeffs(perturbations=pert) - field_np.initialize_coeffs(perturbations=pert) + pert_params = { + "noise": { + "comps": [True, False, False], + "direction": direction, + "amp": 0.0001, + "seed": 1234, + }, + } + field.initialize_coeffs(pert_params=pert_params) + field_np.initialize_coeffs(pert_params=pert_params) # print('#'*80) # print(f'npts={field.vector[0].space.npts}, npts_np={field_np.vector[0].space.npts}') @@ -1354,15 +1361,15 @@ def test_noise_init(Nel, p, spl_kind, space, direction): if __name__ == "__main__": # test_bckgr_init_const([8, 10, 12], [1, 2, 3], [False, False, True], [ # 'H1', 'Hcurl', 'Hdiv'], [True, True, False]) - # test_bckgr_init_mhd( - # [18, 24, 12], - # [1, 2, 1], - # [ - # False, - # True, - # True, - # ], - # show_plot=False, - # ) - test_sincos_init_const([1, 32, 32], [1, 3, 3], [True] * 3, show_plot=True) - test_noise_init([4, 8, 6], [1, 1, 1], [True, True, True], "Hcurl", "e1") + test_bckgr_init_mhd( + [18, 24, 12], + [1, 2, 1], + [ + False, + True, + True, + ], + show_plot=True, + ) + # test_sincos_init_const([1, 32, 32], [1, 3, 3], [True]*3, show_plot=True) + # test_noise_init([4, 8, 6], [1, 1, 1], [True, True, True], "Hcurl", "e1") diff --git a/src/struphy/feec/tests/test_l2_projectors.py b/src/struphy/feec/tests/test_l2_projectors.py index 2e9f611eb..999215c36 100644 --- a/src/struphy/feec/tests/test_l2_projectors.py +++ b/src/struphy/feec/tests/test_l2_projectors.py @@ -1,6 +1,5 @@ import inspect -import cunumpy as xp import matplotlib.pyplot as plt import pytest from psydac.ddm.mpi import mpi as MPI @@ -9,13 +8,14 @@ from struphy.feec.projectors import L2Projector from struphy.feec.psydac_derham import Derham from struphy.geometry import domains +from struphy.utils.arrays import xp as np @pytest.mark.parametrize("Nel", [[16, 32, 1]]) @pytest.mark.parametrize("p", [[2, 1, 1], [3, 2, 1]]) @pytest.mark.parametrize("spl_kind", [[False, True, True]]) @pytest.mark.parametrize("array_input", [False, True]) -def test_l2_projectors_mappings(Nel, p, spl_kind, array_input, with_gvec=False, with_desc=False, do_plot=False): +def test_l2_projectors_mappings(Nel, p, spl_kind, array_input, with_desc, do_plot=False): """Tests the L2-projectors for all available mappings. Both callable and array inputs to the projectors are tested. @@ -28,7 +28,7 @@ def test_l2_projectors_mappings(Nel, p, spl_kind, array_input, with_gvec=False, derham = Derham(Nel, p, spl_kind, comm=comm) # constant function - f = lambda e1, e2, e3: xp.sin(xp.pi * e1) * xp.cos(2 * xp.pi * e2) + f = lambda e1, e2, e3: np.sin(np.pi * e1) * np.cos(2 * np.pi * e2) # create domain object dom_types = [] @@ -39,23 +39,19 @@ def test_l2_projectors_mappings(Nel, p, spl_kind, array_input, with_gvec=False, dom_classes += [val] # evaluation points - e1 = xp.linspace(0.0, 1.0, 30) - e2 = xp.linspace(0.0, 1.0, 40) + e1 = np.linspace(0.0, 1.0, 30) + e2 = np.linspace(0.0, 1.0, 40) e3 = 0.0 - ee1, ee2, ee3 = xp.meshgrid(e1, e2, e3, indexing="ij") + ee1, ee2, ee3 = np.meshgrid(e1, e2, e3, indexing="ij") for dom_type, dom_class in zip(dom_types, dom_classes): print("#" * 80) - print(f"Testing {dom_class =}") + print(f"Testing {dom_class = }") print("#" * 80) - if "GVEC" in dom_type and not with_gvec: - print(f"Attention: {with_gvec =}, GVEC not tested here !!") - continue - if "DESC" in dom_type and not with_desc: - print(f"Attention: {with_desc =}, DESC not tested here !!") + print(f"Attention: {with_desc = }, DESC not tested here !!") continue domain = dom_class() @@ -80,12 +76,12 @@ def test_l2_projectors_mappings(Nel, p, spl_kind, array_input, with_gvec=False, if array_input: pts_q = derham.quad_grid_pts[sp_key] if sp_id in ("H1", "L2"): - ee = xp.meshgrid(*[pt.flatten() for pt in pts_q], indexing="ij") + ee = np.meshgrid(*[pt.flatten() for pt in pts_q], indexing="ij") f_array = f(*ee) else: f_array = [] for pts in pts_q: - ee = xp.meshgrid(*[pt.flatten() for pt in pts], indexing="ij") + ee = np.meshgrid(*[pt.flatten() for pt in pts], indexing="ij") f_array += [f(*ee)] f_args = f_array else: @@ -95,27 +91,27 @@ def test_l2_projectors_mappings(Nel, p, spl_kind, array_input, with_gvec=False, veco = P_L2(f_args, out=out) assert veco is out - assert xp.all(vec.toarray() == veco.toarray()) + assert np.all(vec.toarray() == veco.toarray()) field.vector = vec field_vals = field(e1, e2, e3) if sp_id in ("H1", "L2"): - err = xp.max(xp.abs(f_analytic(ee1, ee2, ee3) - field_vals)) + err = np.max(np.abs(f_analytic(ee1, ee2, ee3) - field_vals)) f_plot = field_vals else: - err = [xp.max(xp.abs(exact(ee1, ee2, ee3) - field_v)) for exact, field_v in zip(f_analytic, field_vals)] + err = [np.max(np.abs(exact(ee1, ee2, ee3) - field_v)) for exact, field_v in zip(f_analytic, field_vals)] f_plot = field_vals[0] - print(f"{sp_id =}, {xp.max(err) =}") + print(f"{sp_id = }, {np.max(err) = }") if sp_id in ("H1", "H1vec"): - assert xp.max(err) < 0.004 + assert np.max(err) < 0.004 else: - assert xp.max(err) < 0.12 + assert np.max(err) < 0.12 if do_plot and rank == 0: plt.figure(f"{dom_type}, {sp_id}") - plt.contourf(e1, e2, xp.squeeze(f_plot[:, :, 0].T)) + plt.contourf(e1, e2, np.squeeze(f_plot[:, :, 0].T)) plt.show() @@ -138,7 +134,7 @@ def test_l2_projectors_convergence(direction, pi, spl_kindi, do_plot=False): for n, Neli in enumerate(Nels): # test function def fun(eta): - return xp.cos(4 * xp.pi * eta) + return np.cos(4 * np.pi * eta) # create derham object, test functions and evaluation points e1 = 0.0 @@ -148,7 +144,7 @@ def fun(eta): Nel = [Neli, 1, 1] p = [pi, 1, 1] spl_kind = [spl_kindi, True, True] - e1 = xp.linspace(0.0, 1.0, 100) + e1 = np.linspace(0.0, 1.0, 100) e = e1 c = 0 @@ -158,7 +154,7 @@ def f(x, y, z): Nel = [1, Neli, 1] p = [1, pi, 1] spl_kind = [True, spl_kindi, True] - e2 = xp.linspace(0.0, 1.0, 100) + e2 = np.linspace(0.0, 1.0, 100) e = e2 c = 1 @@ -168,7 +164,7 @@ def f(x, y, z): Nel = [1, 1, Neli] p = [1, 1, pi] spl_kind = [True, True, spl_kindi] - e3 = xp.linspace(0.0, 1.0, 100) + e3 = np.linspace(0.0, 1.0, 100) e = e3 c = 2 @@ -203,19 +199,19 @@ def f(x, y, z): vec = P_L2(f_analytic) veco = P_L2(f_analytic, out=out) assert veco is out - assert xp.all(vec.toarray() == veco.toarray()) + assert np.all(vec.toarray() == veco.toarray()) field.vector = vec field_vals = field(e1, e2, e3, squeeze_out=True) if sp_id in ("H1", "L2"): - err = xp.max(xp.abs(f_analytic(e1, e2, e3) - field_vals)) + err = np.max(np.abs(f_analytic(e1, e2, e3) - field_vals)) f_plot = field_vals else: - err = [xp.max(xp.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip(f_analytic, field_vals)] + err = [np.max(np.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip(f_analytic, field_vals)] f_plot = field_vals[0] - errors[sp_id] += [xp.max(err)] + errors[sp_id] += [np.max(err)] if do_plot: plt.figure(sp_id + ", L2-proj. convergence") @@ -236,8 +232,8 @@ def f(x, y, z): line_for_rate_p1 = [Ne ** (-rate_p1) * errors[sp_id][0] / Nels[0] ** (-rate_p1) for Ne in Nels] line_for_rate_p0 = [Ne ** (-rate_p0) * errors[sp_id][0] / Nels[0] ** (-rate_p0) for Ne in Nels] - m, _ = xp.polyfit(xp.log(Nels), xp.log(errors[sp_id]), deg=1) - print(f"{sp_id =}, fitted convergence rate = {-m}, degree = {pi}") + m, _ = np.polyfit(np.log(Nels), np.log(errors[sp_id]), deg=1) + print(f"{sp_id = }, fitted convergence rate = {-m}, degree = {pi}") if sp_id in ("H1", "H1vec"): assert -m > (pi + 1 - 0.05) else: @@ -251,7 +247,7 @@ def f(x, y, z): plt.loglog(Nels, line_for_rate_p0, "k--") plt.text(Nels[-2], line_for_rate_p1[-2], f"1/Nel^{rate_p1}") plt.text(Nels[-2], line_for_rate_p0[-2], f"1/Nel^{rate_p0}") - plt.title(f"{sp_id =}, degree = {pi}") + plt.title(f"{sp_id = }, degree = {pi}") plt.xlabel("Nel") if do_plot and rank == 0: @@ -264,5 +260,5 @@ def f(x, y, z): spl_kind = [False, True, True] array_input = True test_l2_projectors_mappings(Nel, p, spl_kind, array_input, do_plot=False, with_desc=False) - test_l2_projectors_convergence(0, 1, True, do_plot=False) + # test_l2_projectors_convergence(0, 1, True, do_plot=True) # test_l2_projectors_convergence(1, 1, False, do_plot=True) diff --git a/src/struphy/feec/tests/test_local_projectors.py b/src/struphy/feec/tests/test_local_projectors.py index f51177a6a..21e2392a4 100644 --- a/src/struphy/feec/tests/test_local_projectors.py +++ b/src/struphy/feec/tests/test_local_projectors.py @@ -1,7 +1,6 @@ import inspect import time -import cunumpy as xp import matplotlib.pyplot as plt import pytest from psydac.ddm.mpi import MockComm @@ -13,6 +12,54 @@ from struphy.feec.local_projectors_kernels import fill_matrix_column from struphy.feec.psydac_derham import Derham from struphy.feec.utilities_local_projectors import get_one_spline, get_span_and_basis, get_values_and_indices_splines +from struphy.utils.arrays import xp as np + + +def get_span_and_basis(pts, space): + """Compute the knot span index and the values of p + 1 basis function at each point in pts. + + Parameters + ---------- + pts : np.array + 2d array of points (ii, iq) = (interval, quadrature point). + + space : SplineSpace + Psydac object, the 1d spline space to be projected. + + Returns + ------- + span : np.array + 2d array indexed by (n, nq), where n is the interval and nq is the quadrature point in the interval. + + basis : np.array + 3d array of values of basis functions indexed by (n, nq, basis function). + """ + + import psydac.core.bsplines as bsp + + # Extract knot vectors, degree and kind of basis + T = space.knots + p = space.degree + + span = np.zeros(pts.shape, dtype=int) + basis = np.zeros((*pts.shape, p + 1), dtype=float) + + for n in range(pts.shape[0]): + for nq in range(pts.shape[1]): + # avoid 1. --> 0. for clamped interpolation + x = pts[n, nq] % (1.0 + 1e-14) + span_tmp = bsp.find_span(T, p, x) + basis[n, nq, :] = bsp.basis_funs_all_ders( + T, + p, + x, + span_tmp, + 0, + normalization=space.basis, + ) + span[n, nq] = span_tmp # % space.nbasis + + return span, basis @pytest.mark.parametrize("Nel", [[14, 16, 18]]) @@ -32,15 +79,15 @@ def test_local_projectors_compare_global(Nel, p, spl_kind): # constant function def f(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) * xp.cos(4.0 * xp.pi * e2) * xp.sin(6.0 * xp.pi * e3) + return np.sin(2.0 * np.pi * e1) * np.cos(4.0 * np.pi * e2) * np.sin(6.0 * np.pi * e3) - # f = lambda e1, e2, e3: xp.sin(2.0*xp.pi*e1) * xp.cos(4.0*xp.pi*e2) + # f = lambda e1, e2, e3: np.sin(2.0*np.pi*e1) * np.cos(4.0*np.pi*e2) # evaluation points - e1 = xp.linspace(0.0, 1.0, 10) - e2 = xp.linspace(0.0, 1.0, 9) - e3 = xp.linspace(0.0, 1.0, 8) + e1 = np.linspace(0.0, 1.0, 10) + e2 = np.linspace(0.0, 1.0, 9) + e3 = np.linspace(0.0, 1.0, 8) - ee1, ee2, ee3 = xp.meshgrid(e1, e2, e3, indexing="ij") + ee1, ee2, ee3 = np.meshgrid(e1, e2, e3, indexing="ij") # loop over spaces for sp_id, sp_key in derham.space_to_form.items(): @@ -79,29 +126,29 @@ def f(e1, e2, e3): fieldg_vals = fieldg(e1, e2, e3) if sp_id in ("H1", "L2"): - err = xp.max(xp.abs(f_analytic(ee1, ee2, ee3) - field_vals)) + err = np.max(np.abs(f_analytic(ee1, ee2, ee3) - field_vals)) # Error comparing the global and local projectors - errg = xp.max(xp.abs(fieldg_vals - field_vals)) + errg = np.max(np.abs(fieldg_vals - field_vals)) else: - err = xp.zeros(3) - err[0] = xp.max(xp.abs(f(ee1, ee2, ee3) - field_vals[0])) - err[1] = xp.max(xp.abs(f(ee1, ee2, ee3) - field_vals[1])) - err[2] = xp.max(xp.abs(f(ee1, ee2, ee3) - field_vals[2])) + err = np.zeros(3) + err[0] = np.max(np.abs(f(ee1, ee2, ee3) - field_vals[0])) + err[1] = np.max(np.abs(f(ee1, ee2, ee3) - field_vals[1])) + err[2] = np.max(np.abs(f(ee1, ee2, ee3) - field_vals[2])) # Error comparing the global and local projectors - errg = xp.zeros(3) - errg[0] = xp.max(xp.abs(fieldg_vals[0] - field_vals[0])) - errg[1] = xp.max(xp.abs(fieldg_vals[1] - field_vals[1])) - errg[2] = xp.max(xp.abs(fieldg_vals[2] - field_vals[2])) + errg = np.zeros(3) + errg[0] = np.max(np.abs(fieldg_vals[0] - field_vals[0])) + errg[1] = np.max(np.abs(fieldg_vals[1] - field_vals[1])) + errg[2] = np.max(np.abs(fieldg_vals[2] - field_vals[2])) - print(f"{sp_id =}, {xp.max(err) =}, {xp.max(errg) =},{exectime =}") + print(f"{sp_id = }, {np.max(err) = }, {np.max(errg) = },{exectime = }") if sp_id in ("H1", "H1vec"): - assert xp.max(err) < 0.011 - assert xp.max(errg) < 0.011 + assert np.max(err) < 0.011 + assert np.max(errg) < 0.011 else: - assert xp.max(err) < 0.1 - assert xp.max(errg) < 0.1 + assert np.max(err) < 0.1 + assert np.max(errg) < 0.1 @pytest.mark.parametrize("direction", [0, 1, 2]) @@ -126,7 +173,7 @@ def test_local_projectors_convergence(direction, pi, spl_kindi, do_plot=False): for n, Neli in enumerate(Nels): # test function def fun(eta): - return xp.cos(4 * xp.pi * eta) + return np.cos(4 * np.pi * eta) # create derham object, test functions and evaluation points e1 = 0.0 @@ -136,7 +183,7 @@ def fun(eta): Nel = [Neli, 1, 1] p = [pi, 1, 1] spl_kind = [spl_kindi, True, True] - e1 = xp.linspace(0.0, 1.0, 100) + e1 = np.linspace(0.0, 1.0, 100) e = e1 c = 0 @@ -146,7 +193,7 @@ def f(x, y, z): Nel = [1, Neli, 1] p = [1, pi, 1] spl_kind = [True, spl_kindi, True] - e2 = xp.linspace(0.0, 1.0, 100) + e2 = np.linspace(0.0, 1.0, 100) e = e2 c = 1 @@ -156,7 +203,7 @@ def f(x, y, z): Nel = [1, 1, Neli] p = [1, 1, pi] spl_kind = [True, True, spl_kindi] - e3 = xp.linspace(0.0, 1.0, 100) + e3 = np.linspace(0.0, 1.0, 100) e = e3 c = 2 @@ -185,13 +232,13 @@ def f(x, y, z): field_vals = field(e1, e2, e3, squeeze_out=True) if sp_id in ("H1", "L2"): - err = xp.max(xp.abs(f_analytic(e1, e2, e3) - field_vals)) + err = np.max(np.abs(f_analytic(e1, e2, e3) - field_vals)) f_plot = field_vals else: - err = [xp.max(xp.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip(f_analytic, field_vals)] + err = [np.max(np.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip(f_analytic, field_vals)] f_plot = field_vals[0] - errors[sp_id] += [xp.max(err)] + errors[sp_id] += [np.max(err)] if do_plot: plt.figure(sp_id + ", Local-proj. convergence") @@ -210,21 +257,21 @@ def f(x, y, z): line_for_rate_p1 = [Ne ** (-rate_p1) * errors[sp_id][0] / Nels[0] ** (-rate_p1) for Ne in Nels] line_for_rate_p0 = [Ne ** (-rate_p0) * errors[sp_id][0] / Nels[0] ** (-rate_p0) for Ne in Nels] - m, _ = xp.polyfit(xp.log(Nels), xp.log(errors[sp_id]), deg=1) + m, _ = np.polyfit(np.log(Nels), np.log(errors[sp_id]), deg=1) if sp_id in ("H1", "H1vec"): # Sometimes for very large number of elements the convergance rate falls of a bit since the error is already so small floating point impressions become relevant # for those cases is better to compute the convergance rate using only the information of Nel with smaller number if -m <= (pi + 1 - 0.1): - m = -xp.log2(errors[sp_id][1] / errors[sp_id][2]) - print(f"{sp_id =}, fitted convergence rate = {-m}, degree = {pi}") + m = -np.log2(errors[sp_id][1] / errors[sp_id][2]) + print(f"{sp_id = }, fitted convergence rate = {-m}, degree = {pi}") assert -m > (pi + 1 - 0.1) else: # Sometimes for very large number of elements the convergance rate falls of a bit since the error is already so small floating point impressions become relevant # for those cases is better to compute the convergance rate using only the information of Nel with smaller number if -m <= (pi - 0.1): - m = -xp.log2(errors[sp_id][1] / errors[sp_id][2]) - print(f"{sp_id =}, fitted convergence rate = {-m}, degree = {pi}") + m = -np.log2(errors[sp_id][1] / errors[sp_id][2]) + print(f"{sp_id = }, fitted convergence rate = {-m}, degree = {pi}") assert -m > (pi - 0.1) if do_plot: @@ -235,7 +282,7 @@ def f(x, y, z): plt.loglog(Nels, line_for_rate_p0, "k--") plt.text(Nels[-2], line_for_rate_p1[-2], f"1/Nel^{rate_p1}") plt.text(Nels[-2], line_for_rate_p0[-2], f"1/Nel^{rate_p0}") - plt.title(f"{sp_id =}, degree = {pi}") + plt.title(f"{sp_id = }, degree = {pi}") plt.xlabel("Nel") if do_plot and rank == 0: @@ -268,12 +315,12 @@ def aux_test_replication_of_basis(Nel, plist, spl_kind): def make_basis_fun(i): def fun(etas, eta2, eta3): if isinstance(etas, float) or isinstance(etas, int): - etas = xp.array([etas]) - out = xp.zeros_like(etas) + etas = np.array([etas]) + out = np.zeros_like(etas) for j, eta in enumerate(etas): span = find_span(T, p, eta) - inds = xp.arange(span - p, span + 1) % N - pos = xp.argwhere(inds == i) + inds = np.arange(span - p, span + 1) % N + pos = np.argwhere(inds == i) # print(f'{pos = }') if pos.size > 0: pos = pos[0, 0] @@ -288,18 +335,18 @@ def fun(etas, eta2, eta3): fun = make_basis_fun(j) lambdas = P_Loc(fun).toarray() - etas = xp.linspace(0.0, 1.0, 100) - fun_h = xp.zeros(100) + etas = np.linspace(0.0, 1.0, 100) + fun_h = np.zeros(100) for k, eta in enumerate(etas): span = find_span(T, p, eta) - ind1 = xp.arange(span - p, span + 1) % N + ind1 = np.arange(span - p, span + 1) % N basis = basis_funs(T, p, eta, span, normalize=normalize) fun_h[k] = evaluation_kernel_1d(p, basis, ind1, lambdas) - if xp.max(xp.abs(fun(etas, 0.0, 0.0) - fun_h)) >= 10.0**-10: - print(xp.max(xp.abs(fun(etas, 0.0, 0.0) - fun_h))) - assert xp.max(xp.abs(fun(etas, 0.0, 0.0) - fun_h)) < 10.0**-10 - # print(f'{j = }, max error: {xp.max(xp.abs(fun(etas,0.0,0.0) - fun_h))}') + if np.max(np.abs(fun(etas, 0.0, 0.0) - fun_h)) >= 10.0**-10: + print(np.max(np.abs(fun(etas, 0.0, 0.0) - fun_h))) + assert np.max(np.abs(fun(etas, 0.0, 0.0) - fun_h)) < 10.0**-10 + # print(f'{j = }, max error: {np.max(np.abs(fun(etas,0.0,0.0) - fun_h))}') # For D-splines @@ -374,7 +421,7 @@ def test_basis_projection_operator_local(Nel, plist, spl_kind, out_sp_key, in_sp # Helper function to handle reshaping and getting spans and basis def process_eta(eta, w1d): if isinstance(eta, (float, int)): - eta = xp.array([eta]) + eta = np.array([eta]) if len(eta.shape) == 1: eta = eta.reshape((eta.shape[0], 1)) spans, values = get_span_and_basis(eta, w1d) @@ -387,7 +434,7 @@ def fun(eta1, eta2, eta3): eta = eta_map[dim_idx] w1d = W1ds[0][dim_idx] if is_B else V1ds[0][dim_idx] - out = xp.zeros_like(eta) + out = np.zeros_like(eta) for j1 in range(eta.shape[0]): for j2 in range(eta.shape[1]): for j3 in range(eta.shape[2]): @@ -430,21 +477,21 @@ def fun(eta1, eta2, eta3): if out_sp_key == "0" or out_sp_key == "3": npts_out = derham.Vh[out_sp_key].npts - starts = xp.array(out.starts, dtype=int) - ends = xp.array(out.ends, dtype=int) - pds = xp.array(out.pads, dtype=int) + starts = np.array(out.starts, dtype=int) + ends = np.array(out.ends, dtype=int) + pds = np.array(out.pads, dtype=int) VFEM1ds = [VFEM.spaces] - nbasis_out = xp.array([VFEM1ds[0][0].nbasis, VFEM1ds[0][1].nbasis, VFEM1ds[0][2].nbasis]) + nbasis_out = np.array([VFEM1ds[0][0].nbasis, VFEM1ds[0][1].nbasis, VFEM1ds[0][2].nbasis]) else: - npts_out = xp.array([sp.npts for sp in P_Loc.coeff_space.spaces]) - pds = xp.array([vi.pads for vi in P_Loc.coeff_space.spaces]) - starts = xp.array([vi.starts for vi in P_Loc.coeff_space.spaces]) - ends = xp.array([vi.ends for vi in P_Loc.coeff_space.spaces]) - starts = xp.array(starts, dtype=int) - ends = xp.array(ends, dtype=int) - pds = xp.array(pds, dtype=int) + npts_out = np.array([sp.npts for sp in P_Loc.coeff_space.spaces]) + pds = np.array([vi.pads for vi in P_Loc.coeff_space.spaces]) + starts = np.array([vi.starts for vi in P_Loc.coeff_space.spaces]) + ends = np.array([vi.ends for vi in P_Loc.coeff_space.spaces]) + starts = np.array(starts, dtype=int) + ends = np.array(ends, dtype=int) + pds = np.array(pds, dtype=int) VFEM1ds = [comp.spaces for comp in VFEM.spaces] - nbasis_out = xp.array( + nbasis_out = np.array( [ [VFEM1ds[0][0].nbasis, VFEM1ds[0][1].nbasis, VFEM1ds[0][2].nbasis], [ @@ -453,13 +500,13 @@ def fun(eta1, eta2, eta3): VFEM1ds[1][2].nbasis, ], [VFEM1ds[2][0].nbasis, VFEM1ds[2][1].nbasis, VFEM1ds[2][2].nbasis], - ], + ] ) if in_sp_key == "0" or in_sp_key == "3": npts_in = derham.Vh[in_sp_key].npts else: - npts_in = xp.array([sp.npts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) + npts_in = np.array([sp.npts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) def define_basis(in_sp_key): def wrapper(dim, index, h=None): @@ -509,13 +556,13 @@ def basis3(i3, h=None): input[random_i0, random_i1, random_i2] = 1.0 input.update_ghost_regions() else: - npts_in = xp.array([sp.npts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) + npts_in = np.array([sp.npts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) random_h = random.randrange(0, 3) random_i0 = random.randrange(0, npts_in[random_h][0]) random_i1 = random.randrange(0, npts_in[random_h][1]) random_i2 = random.randrange(0, npts_in[random_h][2]) - starts_in = xp.array([sp.starts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) - ends_in = xp.array([sp.ends for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) + starts_in = np.array([sp.starts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) + ends_in = np.array([sp.ends for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) if starts_in[random_h][0] <= random_i0 and random_i0 <= ends_in[random_h][0]: input[random_h][random_i0, random_i1, random_i2] = 1.0 input.update_ghost_regions() @@ -523,84 +570,84 @@ def basis3(i3, h=None): # We define the matrix if out_sp_key == "0" or out_sp_key == "3": if in_sp_key == "0" or in_sp_key == "3": - matrix = xp.zeros((npts_out[0] * npts_out[1] * npts_out[2], npts_in[0] * npts_in[1] * npts_in[2])) + matrix = np.zeros((npts_out[0] * npts_out[1] * npts_out[2], npts_in[0] * npts_in[1] * npts_in[2])) else: - matrix = xp.zeros( + matrix = np.zeros( ( npts_out[0] * npts_out[1] * npts_out[2], npts_in[0][0] * npts_in[0][1] * npts_in[0][2] + npts_in[1][0] * npts_in[1][1] * npts_in[1][2] + npts_in[2][0] * npts_in[2][1] * npts_in[2][2], - ), + ) ) else: if in_sp_key == "0" or in_sp_key == "3": - matrix0 = xp.zeros((npts_out[0][0] * npts_out[0][1] * npts_out[0][2], npts_in[0] * npts_in[1] * npts_in[2])) - matrix1 = xp.zeros((npts_out[1][0] * npts_out[1][1] * npts_out[1][2], npts_in[0] * npts_in[1] * npts_in[2])) - matrix2 = xp.zeros((npts_out[2][0] * npts_out[2][1] * npts_out[2][2], npts_in[0] * npts_in[1] * npts_in[2])) + matrix0 = np.zeros((npts_out[0][0] * npts_out[0][1] * npts_out[0][2], npts_in[0] * npts_in[1] * npts_in[2])) + matrix1 = np.zeros((npts_out[1][0] * npts_out[1][1] * npts_out[1][2], npts_in[0] * npts_in[1] * npts_in[2])) + matrix2 = np.zeros((npts_out[2][0] * npts_out[2][1] * npts_out[2][2], npts_in[0] * npts_in[1] * npts_in[2])) else: - matrix00 = xp.zeros( + matrix00 = np.zeros( ( npts_out[0][0] * npts_out[0][1] * npts_out[0][2], npts_in[0][0] * npts_in[0][1] * npts_in[0][2], - ), + ) ) - matrix10 = xp.zeros( + matrix10 = np.zeros( ( npts_out[1][0] * npts_out[1][1] * npts_out[1][2], npts_in[0][0] * npts_in[0][1] * npts_in[0][2], - ), + ) ) - matrix20 = xp.zeros( + matrix20 = np.zeros( ( npts_out[2][0] * npts_out[2][1] * npts_out[2][2], npts_in[0][0] * npts_in[0][1] * npts_in[0][2], - ), + ) ) - matrix01 = xp.zeros( + matrix01 = np.zeros( ( npts_out[0][0] * npts_out[0][1] * npts_out[0][2], npts_in[1][0] * npts_in[1][1] * npts_in[1][2], - ), + ) ) - matrix11 = xp.zeros( + matrix11 = np.zeros( ( npts_out[1][0] * npts_out[1][1] * npts_out[1][2], npts_in[1][0] * npts_in[1][1] * npts_in[1][2], - ), + ) ) - matrix21 = xp.zeros( + matrix21 = np.zeros( ( npts_out[2][0] * npts_out[2][1] * npts_out[2][2], npts_in[1][0] * npts_in[1][1] * npts_in[1][2], - ), + ) ) - matrix02 = xp.zeros( + matrix02 = np.zeros( ( npts_out[0][0] * npts_out[0][1] * npts_out[0][2], npts_in[2][0] * npts_in[2][1] * npts_in[2][2], - ), + ) ) - matrix12 = xp.zeros( + matrix12 = np.zeros( ( npts_out[1][0] * npts_out[1][1] * npts_out[1][2], npts_in[2][0] * npts_in[2][1] * npts_in[2][2], - ), + ) ) - matrix22 = xp.zeros( + matrix22 = np.zeros( ( npts_out[2][0] * npts_out[2][1] * npts_out[2][2], npts_in[2][0] * npts_in[2][1] * npts_in[2][2], - ), + ) ) # We build the BasisProjectionOperator by hand if out_sp_key == "0" or out_sp_key == "3": if in_sp_key == "0" or in_sp_key == "3": - # def f_analytic(e1,e2,e3): return (xp.sin(2.0*xp.pi*e1)+xp.cos(4.0*xp.pi*e2))*basis1(random_i0)(e1,e2,e3)*basis2(random_i1)(e1,e2,e3)*basis3(random_i2)(e1,e2,e3) + # def f_analytic(e1,e2,e3): return (np.sin(2.0*np.pi*e1)+np.cos(4.0*np.pi*e2))*basis1(random_i0)(e1,e2,e3)*basis2(random_i1)(e1,e2,e3)*basis3(random_i2)(e1,e2,e3) # out = P_Loc(f_analytic) counter = 0 @@ -610,7 +657,7 @@ def basis3(i3, h=None): def f_analytic(e1, e2, e3): return ( - (xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e2)) + (np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e2)) * basis1(col0)(e1, e2, e3) * basis2(col1)(e1, e2, e3) * basis3(col2)(e1, e2, e3) @@ -630,7 +677,7 @@ def f_analytic(e1, e2, e3): def f_analytic(e1, e2, e3): return ( - (xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e2)) + (np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e2)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -649,7 +696,7 @@ def f_analytic(e1, e2, e3): def f_analytic1(e1, e2, e3): return ( - (xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e2)) + (np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e2)) * basis1(col0)(e1, e2, e3) * basis2(col1)(e1, e2, e3) * basis3(col2)(e1, e2, e3) @@ -657,7 +704,7 @@ def f_analytic1(e1, e2, e3): def f_analytic2(e1, e2, e3): return ( - (xp.cos(2.0 * xp.pi * e2) + xp.cos(6.0 * xp.pi * e3)) + (np.cos(2.0 * np.pi * e2) + np.cos(6.0 * np.pi * e3)) * basis1(col0)(e1, e2, e3) * basis2(col1)(e1, e2, e3) * basis3(col2)(e1, e2, e3) @@ -665,7 +712,7 @@ def f_analytic2(e1, e2, e3): def f_analytic3(e1, e2, e3): return ( - (xp.sin(6.0 * xp.pi * e1) + xp.sin(4.0 * xp.pi * e3)) + (np.sin(6.0 * np.pi * e1) + np.sin(4.0 * np.pi * e3)) * basis1(col0)(e1, e2, e3) * basis2(col1)(e1, e2, e3) * basis3(col2)(e1, e2, e3) @@ -677,7 +724,7 @@ def f_analytic3(e1, e2, e3): fill_matrix_column(starts[2], ends[2], pds[2], counter, nbasis_out[2], matrix2, out[2]._data) counter += 1 - matrix = xp.vstack((matrix0, matrix1, matrix2)) + matrix = np.vstack((matrix0, matrix1, matrix2)) else: for h in range(3): @@ -689,7 +736,7 @@ def f_analytic3(e1, e2, e3): def f_analytic0(e1, e2, e3): return ( - (xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e2)) + (np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e2)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -697,7 +744,7 @@ def f_analytic0(e1, e2, e3): def f_analytic1(e1, e2, e3): return ( - (xp.sin(10.0 * xp.pi * e1) + xp.cos(41.0 * xp.pi * e2)) + (np.sin(10.0 * np.pi * e1) + np.cos(41.0 * np.pi * e2)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -705,7 +752,7 @@ def f_analytic1(e1, e2, e3): def f_analytic2(e1, e2, e3): return ( - (xp.sin(25.0 * xp.pi * e1) + xp.cos(49.0 * xp.pi * e2)) + (np.sin(25.0 * np.pi * e1) + np.cos(49.0 * np.pi * e2)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -715,7 +762,7 @@ def f_analytic2(e1, e2, e3): def f_analytic0(e1, e2, e3): return ( - (xp.cos(2.0 * xp.pi * e2) + xp.cos(6.0 * xp.pi * e3)) + (np.cos(2.0 * np.pi * e2) + np.cos(6.0 * np.pi * e3)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -723,7 +770,7 @@ def f_analytic0(e1, e2, e3): def f_analytic1(e1, e2, e3): return ( - (xp.cos(12.0 * xp.pi * e2) + xp.cos(62.0 * xp.pi * e3)) + (np.cos(12.0 * np.pi * e2) + np.cos(62.0 * np.pi * e3)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -731,7 +778,7 @@ def f_analytic1(e1, e2, e3): def f_analytic2(e1, e2, e3): return ( - (xp.cos(25.0 * xp.pi * e2) + xp.cos(68.0 * xp.pi * e3)) + (np.cos(25.0 * np.pi * e2) + np.cos(68.0 * np.pi * e3)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -740,7 +787,7 @@ def f_analytic2(e1, e2, e3): def f_analytic0(e1, e2, e3): return ( - (xp.sin(6.0 * xp.pi * e1) + xp.sin(4.0 * xp.pi * e3)) + (np.sin(6.0 * np.pi * e1) + np.sin(4.0 * np.pi * e3)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -748,7 +795,7 @@ def f_analytic0(e1, e2, e3): def f_analytic1(e1, e2, e3): return ( - (xp.sin(16.0 * xp.pi * e1) + xp.sin(43.0 * xp.pi * e3)) + (np.sin(16.0 * np.pi * e1) + np.sin(43.0 * np.pi * e3)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -756,7 +803,7 @@ def f_analytic1(e1, e2, e3): def f_analytic2(e1, e2, e3): return ( - (xp.sin(65.0 * xp.pi * e1) + xp.sin(47.0 * xp.pi * e3)) + (np.sin(65.0 * np.pi * e1) + np.sin(47.0 * np.pi * e3)) * basis1(col0, h)(e1, e2, e3) * basis2(col1, h)(e1, e2, e3) * basis3(col2, h)(e1, e2, e3) @@ -851,23 +898,23 @@ def f_analytic2(e1, e2, e3): ) counter += 1 - matrix0 = xp.hstack((matrix00, matrix01, matrix02)) - matrix1 = xp.hstack((matrix10, matrix11, matrix12)) - matrix2 = xp.hstack((matrix20, matrix21, matrix22)) - matrix = xp.vstack((matrix0, matrix1, matrix2)) + matrix0 = np.hstack((matrix00, matrix01, matrix02)) + matrix1 = np.hstack((matrix10, matrix11, matrix12)) + matrix2 = np.hstack((matrix20, matrix21, matrix22)) + matrix = np.vstack((matrix0, matrix1, matrix2)) # Now we build the same matrix using the BasisProjectionOperatorLocal if out_sp_key == "0" or out_sp_key == "3": if in_sp_key == "0" or in_sp_key == "3": def f_analytic(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e2) + return np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e2) matrix_new = BasisProjectionOperatorLocal(P_Loc, derham.Vh_fem[in_sp_key], [[f_analytic]], transposed=False) else: def f_analytic(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e2) + return np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e2) matrix_new = BasisProjectionOperatorLocal( P_Loc, @@ -882,13 +929,13 @@ def f_analytic(e1, e2, e3): if in_sp_key == "0" or in_sp_key == "3": def f_analytic1(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e2) + return np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e2) def f_analytic2(e1, e2, e3): - return xp.cos(2.0 * xp.pi * e2) + xp.cos(6.0 * xp.pi * e3) + return np.cos(2.0 * np.pi * e2) + np.cos(6.0 * np.pi * e3) def f_analytic3(e1, e2, e3): - return xp.sin(6.0 * xp.pi * e1) + xp.sin(4.0 * xp.pi * e3) + return np.sin(6.0 * np.pi * e1) + np.sin(4.0 * np.pi * e3) matrix_new = BasisProjectionOperatorLocal( P_Loc, @@ -905,31 +952,31 @@ def f_analytic3(e1, e2, e3): else: def f_analytic00(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e2) + return np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e2) def f_analytic01(e1, e2, e3): - return xp.cos(2.0 * xp.pi * e2) + xp.cos(6.0 * xp.pi * e3) + return np.cos(2.0 * np.pi * e2) + np.cos(6.0 * np.pi * e3) def f_analytic02(e1, e2, e3): - return xp.sin(6.0 * xp.pi * e1) + xp.sin(4.0 * xp.pi * e3) + return np.sin(6.0 * np.pi * e1) + np.sin(4.0 * np.pi * e3) def f_analytic10(e1, e2, e3): - return xp.sin(10.0 * xp.pi * e1) + xp.cos(41.0 * xp.pi * e2) + return np.sin(10.0 * np.pi * e1) + np.cos(41.0 * np.pi * e2) def f_analytic11(e1, e2, e3): - return xp.cos(12.0 * xp.pi * e2) + xp.cos(62.0 * xp.pi * e3) + return np.cos(12.0 * np.pi * e2) + np.cos(62.0 * np.pi * e3) def f_analytic12(e1, e2, e3): - return xp.sin(16.0 * xp.pi * e1) + xp.sin(43.0 * xp.pi * e3) + return np.sin(16.0 * np.pi * e1) + np.sin(43.0 * np.pi * e3) def f_analytic20(e1, e2, e3): - return xp.sin(25.0 * xp.pi * e1) + xp.cos(49.0 * xp.pi * e2) + return np.sin(25.0 * np.pi * e1) + np.cos(49.0 * np.pi * e2) def f_analytic21(e1, e2, e3): - return xp.cos(25.0 * xp.pi * e2) + xp.cos(68.0 * xp.pi * e3) + return np.cos(25.0 * np.pi * e2) + np.cos(68.0 * np.pi * e3) def f_analytic22(e1, e2, e3): - return xp.sin(65.0 * xp.pi * e1) + xp.sin(47.0 * xp.pi * e3) + return np.sin(65.0 * np.pi * e1) + np.sin(47.0 * np.pi * e3) matrix_new = BasisProjectionOperatorLocal( P_Loc, @@ -946,7 +993,7 @@ def f_analytic22(e1, e2, e3): transposed=False, ) - compare_arrays(matrix_new.dot(v), xp.matmul(matrix, varr), rank) + compare_arrays(matrix_new.dot(v), np.matmul(matrix, varr), rank) print("BasisProjectionOperatorLocal test passed.") @@ -982,7 +1029,7 @@ def test_basis_projection_operator_local_new(Nel, plist, spl_kind, out_sp_key, i # Helper function to handle reshaping and getting spans and basis def process_eta(eta, w1d): if isinstance(eta, (float, int)): - eta = xp.array([eta]) + eta = np.array([eta]) if len(eta.shape) == 1: eta = eta.reshape((eta.shape[0], 1)) spans, values = get_span_and_basis(eta, w1d) @@ -995,7 +1042,7 @@ def fun(eta1, eta2, eta3): eta = eta_map[dim_idx] w1d = W1ds[0][dim_idx] if is_B else V1ds[0][dim_idx] - out = xp.zeros_like(eta) + out = np.zeros_like(eta) for j1 in range(eta.shape[0]): for j2 in range(eta.shape[1]): for j3 in range(eta.shape[2]): @@ -1072,22 +1119,22 @@ def basis3(i3, h=None): input[random_i0, random_i1, random_i2] = 1.0 input.update_ghost_regions() else: - npts_in = xp.array([sp.npts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) + npts_in = np.array([sp.npts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) random_h = random.randrange(0, 3) random_i0 = random.randrange(0, npts_in[random_h][0]) random_i1 = random.randrange(0, npts_in[random_h][1]) random_i2 = random.randrange(0, npts_in[random_h][2]) - starts = xp.array([sp.starts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) - ends = xp.array([sp.ends for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) + starts = np.array([sp.starts for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) + ends = np.array([sp.ends for sp in derham.Vh_fem[in_sp_key].coeff_space.spaces]) if starts[random_h][0] <= random_i0 and random_i0 <= ends[random_h][0]: input[random_h][random_i0, random_i1, random_i2] = 1.0 input.update_ghost_regions() - etas1 = xp.linspace(0.0, 1.0, 1000) - etas2 = xp.array([0.5]) + etas1 = np.linspace(0.0, 1.0, 1000) + etas2 = np.array([0.5]) - etas3 = xp.array([0.5]) - meshgrid = xp.meshgrid(*[etas1, etas2, etas3], indexing="ij") + etas3 = np.array([0.5]) + meshgrid = np.meshgrid(*[etas1, etas2, etas3], indexing="ij") # Now we build the same matrix using the BasisProjectionOperatorLocal and BasisProjectionOperator @@ -1095,7 +1142,7 @@ def basis3(i3, h=None): if in_sp_key == "0" or in_sp_key == "3": def f_analytic(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) + xp.sin(4.0 * xp.pi * e1) + return np.sin(2.0 * np.pi * e1) + np.sin(4.0 * np.pi * e1) matrix_new = BasisProjectionOperatorLocal(P_Loc, derham.Vh_fem[in_sp_key], [[f_analytic]], transposed=False) matrix_global = BasisProjectionOperator(P, derham.Vh_fem[in_sp_key], [[f_analytic]], transposed=False) @@ -1109,7 +1156,7 @@ def f_analytic(e1, e2, e3): else: def f_analytic(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e1) + return np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e1) matrix_new = BasisProjectionOperatorLocal( P_Loc, @@ -1139,13 +1186,13 @@ def f_analytic(e1, e2, e3): if in_sp_key == "0" or in_sp_key == "3": def f_analytic1(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e1) + return np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e1) def f_analytic2(e1, e2, e3): - return xp.cos(2.0 * xp.pi * e1) + xp.cos(6.0 * xp.pi * e1) + return np.cos(2.0 * np.pi * e1) + np.cos(6.0 * np.pi * e1) def f_analytic3(e1, e2, e3): - return xp.sin(6.0 * xp.pi * e1) + xp.sin(4.0 * xp.pi * e1) + return np.sin(6.0 * np.pi * e1) + np.sin(4.0 * np.pi * e1) matrix_new = BasisProjectionOperatorLocal( P_Loc, @@ -1172,7 +1219,7 @@ def f_analytic3(e1, e2, e3): transposed=False, ) - analytic_vals = xp.array( + analytic_vals = np.array( [ f_analytic1(*meshgrid) * basis1(random_i0)(*meshgrid) @@ -1186,36 +1233,36 @@ def f_analytic3(e1, e2, e3): * basis1(random_i0)(*meshgrid) * basis2(random_i1)(*meshgrid) * basis3(random_i2)(*meshgrid), - ], + ] ) else: def f_analytic00(e1, e2, e3): - return xp.sin(2.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e1) + return np.sin(2.0 * np.pi * e1) + np.cos(4.0 * np.pi * e1) def f_analytic01(e1, e2, e3): - return xp.cos(2.0 * xp.pi * e1) + xp.cos(6.0 * xp.pi * e1) + return np.cos(2.0 * np.pi * e1) + np.cos(6.0 * np.pi * e1) def f_analytic02(e1, e2, e3): - return xp.sin(6.0 * xp.pi * e1) + xp.sin(4.0 * xp.pi * e1) + return np.sin(6.0 * np.pi * e1) + np.sin(4.0 * np.pi * e1) def f_analytic10(e1, e2, e3): - return xp.sin(3.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e1) + return np.sin(3.0 * np.pi * e1) + np.cos(4.0 * np.pi * e1) def f_analytic11(e1, e2, e3): - return xp.cos(2.0 * xp.pi * e1) + xp.cos(3.0 * xp.pi * e1) + return np.cos(2.0 * np.pi * e1) + np.cos(3.0 * np.pi * e1) def f_analytic12(e1, e2, e3): - return xp.sin(5.0 * xp.pi * e1) + xp.sin(3.0 * xp.pi * e1) + return np.sin(5.0 * np.pi * e1) + np.sin(3.0 * np.pi * e1) def f_analytic20(e1, e2, e3): - return xp.sin(5.0 * xp.pi * e1) + xp.cos(4.0 * xp.pi * e1) + return np.sin(5.0 * np.pi * e1) + np.cos(4.0 * np.pi * e1) def f_analytic21(e1, e2, e3): - return xp.cos(5.0 * xp.pi * e1) + xp.cos(6.0 * xp.pi * e1) + return np.cos(5.0 * np.pi * e1) + np.cos(6.0 * np.pi * e1) def f_analytic22(e1, e2, e3): - return xp.sin(5.0 * xp.pi * e1) + xp.sin(4.0 * xp.pi * e1) + return np.sin(5.0 * np.pi * e1) + np.sin(4.0 * np.pi * e1) matrix_new = BasisProjectionOperatorLocal( P_Loc, @@ -1253,14 +1300,14 @@ def f_analytic22(e1, e2, e3): } # Use the map to get analytic values - analytic_vals = xp.array( + analytic_vals = np.array( [ f_analytic_map[dim][random_h](*meshgrid) * basis1(random_i0, random_h)(*meshgrid) * basis2(random_i1, random_h)(*meshgrid) * basis3(random_i2, random_h)(*meshgrid) for dim in range(3) - ], + ] ) FE_loc = matrix_new.dot(input) @@ -1283,14 +1330,14 @@ def f_analytic22(e1, e2, e3): fieldglo = derham.create_spline_function("fh", out_sp_id) fieldglo.vector = FE_glo - errorloc = xp.abs(fieldloc(*meshgrid) - analytic_vals) - errorglo = xp.abs(fieldglo(*meshgrid) - analytic_vals) + errorloc = np.abs(fieldloc(*meshgrid) - analytic_vals) + errorglo = np.abs(fieldglo(*meshgrid) - analytic_vals) - meanlocal = xp.mean(errorloc) - maxlocal = xp.max(errorloc) + meanlocal = np.mean(errorloc) + maxlocal = np.max(errorloc) - meanglobal = xp.mean(errorglo) - maxglobal = xp.max(errorglo) + meanglobal = np.mean(errorglo) + maxglobal = np.max(errorglo) if isinstance(comm, MockComm): reducemeanlocal = meanlocal @@ -1320,10 +1367,10 @@ def f_analytic22(e1, e2, e3): if rank == 0: assert reducemeanlocal < 10.0 * reducemeanglobal or reducemeanlocal < 10.0**-5 - print(f"{reducemeanlocal =}") - print(f"{reducemaxlocal =}") - print(f"{reducemeanglobal =}") - print(f"{reducemaxglobal =}") + print(f"{reducemeanlocal = }") + print(f"{reducemaxlocal = }") + print(f"{reducemeanglobal = }") + print(f"{reducemaxglobal = }") if do_plot: if out_sp_key == "0" or out_sp_key == "3": @@ -1377,7 +1424,7 @@ def aux_test_spline_evaluation(Nel, plist, spl_kind): # Helper function to handle reshaping and getting spans and basis def process_eta(eta, w1d): if isinstance(eta, (float, int)): - eta = xp.array([eta]) + eta = np.array([eta]) if len(eta.shape) == 1: eta = eta.reshape((eta.shape[0], 1)) spans, values = get_span_and_basis(eta, w1d) @@ -1390,7 +1437,7 @@ def fun(eta1, eta2, eta3): eta = eta_map[dim_idx] w1d = W1ds[0][dim_idx] if is_B else V1ds[0][dim_idx] - out = xp.zeros_like(eta) + out = np.zeros_like(eta) for j1 in range(eta.shape[0]): for j2 in range(eta.shape[1]): for j3 in range(eta.shape[2]): @@ -1424,10 +1471,10 @@ def fun(eta1, eta2, eta3): fieldD = derham.create_spline_function("fh", "L2") npts_in_D = derham.Vh["3"].npts - etas1 = xp.linspace(0.0, 1.0, 20) - etas2 = xp.linspace(0.0, 1.0, 20) - etas3 = xp.linspace(0.0, 1.0, 20) - meshgrid = xp.meshgrid(*[etas1, etas2, etas3], indexing="ij") + etas1 = np.linspace(0.0, 1.0, 20) + etas2 = np.linspace(0.0, 1.0, 20) + etas3 = np.linspace(0.0, 1.0, 20) + meshgrid = np.meshgrid(*[etas1, etas2, etas3], indexing="ij") maxerrorB = 0.0 @@ -1440,7 +1487,7 @@ def fun(eta1, eta2, eta3): fieldB.vector = inputB def error(e1, e2, e3): - return xp.abs( + return np.abs( fieldB(e1, e2, e3) - ( make_basis_fun(True, 0, col0)(e1, e2, e3) @@ -1449,13 +1496,13 @@ def error(e1, e2, e3): ), ) - auxerror = xp.max(error(*meshgrid)) + auxerror = np.max(error(*meshgrid)) if auxerror > maxerrorB: maxerrorB = auxerror inputB[col0, col1, col2] = 0.0 - print(f"{maxerrorB =}") + print(f"{maxerrorB = }") assert maxerrorB < 10.0**-13 maxerrorD = 0.0 @@ -1468,7 +1515,7 @@ def error(e1, e2, e3): fieldD.vector = inputD def error(e1, e2, e3): - return xp.abs( + return np.abs( fieldD(e1, e2, e3) - ( make_basis_fun(False, 0, col0)(e1, e2, e3) @@ -1477,13 +1524,13 @@ def error(e1, e2, e3): ), ) - auxerror = xp.max(error(*meshgrid)) + auxerror = np.max(error(*meshgrid)) if auxerror > maxerrorD: maxerrorD = auxerror inputD[col0, col1, col2] = 0.0 - print(f"{maxerrorD =}") + print(f"{maxerrorD = }") assert maxerrorD < 10.0**-13 print("Test spline evaluation passed.") diff --git a/src/struphy/feec/tests/test_lowdim_nel_is_1.py b/src/struphy/feec/tests/test_lowdim_nel_is_1.py index 325da31ea..cdc7e0705 100644 --- a/src/struphy/feec/tests/test_lowdim_nel_is_1.py +++ b/src/struphy/feec/tests/test_lowdim_nel_is_1.py @@ -7,13 +7,13 @@ def test_lowdim_derham(Nel, p, spl_kind, do_plot=False): """Test Nel=1 in various directions.""" - import cunumpy as xp from matplotlib import pyplot as plt from psydac.ddm.mpi import mpi as MPI from psydac.linalg.block import BlockVector from psydac.linalg.stencil import StencilVector from struphy.feec.psydac_derham import Derham + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -74,17 +74,17 @@ def test_lowdim_derham(Nel, p, spl_kind, do_plot=False): ### TEST COMMUTING PROJECTORS ### ################################# def fun(eta): - return xp.cos(2 * xp.pi * eta) + return np.cos(2 * np.pi * eta) def dfun(eta): - return -2 * xp.pi * xp.sin(2 * xp.pi * eta) + return -2 * np.pi * np.sin(2 * np.pi * eta) # evaluation points and gradient e1 = 0.0 e2 = 0.0 e3 = 0.0 if Nel[0] > 1: - e1 = xp.linspace(0.0, 1.0, 100) + e1 = np.linspace(0.0, 1.0, 100) e = e1 c = 0 @@ -95,12 +95,12 @@ def dfx(x, y, z): return dfun(x) def dfy(x, y, z): - return xp.zeros_like(x) + return np.zeros_like(x) def dfz(x, y, z): - return xp.zeros_like(x) + return np.zeros_like(x) elif Nel[1] > 1: - e2 = xp.linspace(0.0, 1.0, 100) + e2 = np.linspace(0.0, 1.0, 100) e = e2 c = 1 @@ -108,15 +108,15 @@ def f(x, y, z): return fun(y) def dfx(x, y, z): - return xp.zeros_like(y) + return np.zeros_like(y) def dfy(x, y, z): return dfun(y) def dfz(x, y, z): - return xp.zeros_like(y) + return np.zeros_like(y) elif Nel[2] > 1: - e3 = xp.linspace(0.0, 1.0, 100) + e3 = np.linspace(0.0, 1.0, 100) e = e3 c = 2 @@ -124,10 +124,10 @@ def f(x, y, z): return fun(z) def dfx(x, y, z): - return xp.zeros_like(z) + return np.zeros_like(z) def dfy(x, y, z): - return xp.zeros_like(z) + return np.zeros_like(z) def dfz(x, y, z): return dfun(z) @@ -160,22 +160,22 @@ def div_f(x, y, z): field_f0_vals = field_f0(e1, e2, e3, squeeze_out=True) # a) projection error - err_f0 = xp.max(xp.abs(f(e1, e2, e3) - field_f0_vals)) - print(f"\n{err_f0 =}") + err_f0 = np.max(np.abs(f(e1, e2, e3) - field_f0_vals)) + print(f"\n{err_f0 = }") assert err_f0 < 1e-2 # b) commuting property df0_h = derham.grad.dot(f0_h) - assert xp.allclose(df0_h.toarray(), proj_of_grad_f.toarray()) + assert np.allclose(df0_h.toarray(), proj_of_grad_f.toarray()) # c) derivative error field_df0 = derham.create_spline_function("df0", "Hcurl") field_df0.vector = df0_h field_df0_vals = field_df0(e1, e2, e3, squeeze_out=True) - err_df0 = [xp.max(xp.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip(grad_f, field_df0_vals)] - print(f"{err_df0 =}") - assert xp.max(err_df0) < 0.64 + err_df0 = [np.max(np.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip(grad_f, field_df0_vals)] + print(f"{err_df0 = }") + assert np.max(err_df0) < 0.64 # d) plotting plt.figure(figsize=(8, 12)) @@ -202,22 +202,22 @@ def div_f(x, y, z): field_f1_vals = field_f1(e1, e2, e3, squeeze_out=True) # a) projection error - err_f1 = [xp.max(xp.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip([f, f, f], field_f1_vals)] - print(f"{err_f1 =}") - assert xp.max(err_f1) < 0.09 + err_f1 = [np.max(np.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip([f, f, f], field_f1_vals)] + print(f"{err_f1 = }") + assert np.max(err_f1) < 0.09 # b) commuting property df1_h = derham.curl.dot(f1_h) - assert xp.allclose(df1_h.toarray(), proj_of_curl_fff.toarray()) + assert np.allclose(df1_h.toarray(), proj_of_curl_fff.toarray()) # c) derivative error field_df1 = derham.create_spline_function("df1", "Hdiv") field_df1.vector = df1_h field_df1_vals = field_df1(e1, e2, e3, squeeze_out=True) - err_df1 = [xp.max(xp.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip(curl_f, field_df1_vals)] - print(f"{err_df1 =}") - assert xp.max(err_df1) < 0.64 + err_df1 = [np.max(np.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip(curl_f, field_df1_vals)] + print(f"{err_df1 = }") + assert np.max(err_df1) < 0.64 # d) plotting plt.figure(figsize=(8, 12)) @@ -249,22 +249,22 @@ def div_f(x, y, z): field_f2_vals = field_f2(e1, e2, e3, squeeze_out=True) # a) projection error - err_f2 = [xp.max(xp.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip([f, f, f], field_f2_vals)] - print(f"{err_f2 =}") - assert xp.max(err_f2) < 0.09 + err_f2 = [np.max(np.abs(exact(e1, e2, e3) - field_v)) for exact, field_v in zip([f, f, f], field_f2_vals)] + print(f"{err_f2 = }") + assert np.max(err_f2) < 0.09 # b) commuting property df2_h = derham.div.dot(f2_h) - assert xp.allclose(df2_h.toarray(), proj_of_div_fff.toarray()) + assert np.allclose(df2_h.toarray(), proj_of_div_fff.toarray()) # c) derivative error field_df2 = derham.create_spline_function("df2", "L2") field_df2.vector = df2_h field_df2_vals = field_df2(e1, e2, e3, squeeze_out=True) - err_df2 = xp.max(xp.abs(div_f(e1, e2, e3) - field_df2_vals)) - print(f"{err_df2 =}") - assert xp.max(err_df2) < 0.64 + err_df2 = np.max(np.abs(div_f(e1, e2, e3) - field_df2_vals)) + print(f"{err_df2 = }") + assert np.max(err_df2) < 0.64 # d) plotting plt.figure(figsize=(8, 12)) @@ -277,7 +277,7 @@ def div_f(x, y, z): plt.subplot(2, 1, 2) plt.plot(e, div_f(e1, e2, e3), "o") plt.plot(e, field_df2_vals) - plt.title("div") + plt.title(f"div") plt.subplots_adjust(wspace=1.0, hspace=0.4) @@ -291,8 +291,8 @@ def div_f(x, y, z): field_f3_vals = field_f3(e1, e2, e3, squeeze_out=True) # a) projection error - err_f3 = xp.max(xp.abs(f(e1, e2, e3) - field_f3_vals)) - print(f"{err_f3 =}") + err_f3 = np.max(np.abs(f(e1, e2, e3) - field_f3_vals)) + print(f"{err_f3 = }") assert err_f3 < 0.09 # d) plotting diff --git a/src/struphy/feec/tests/test_mass_matrices.py b/src/struphy/feec/tests/test_mass_matrices.py index e1d629c2e..272a5280b 100644 --- a/src/struphy/feec/tests/test_mass_matrices.py +++ b/src/struphy/feec/tests/test_mass_matrices.py @@ -6,13 +6,12 @@ @pytest.mark.parametrize("spl_kind", [[False, True, True], [True, False, True]]) @pytest.mark.parametrize( "dirichlet_bc", - [None, [(False, True), (True, False), (False, False)], [(True, False), (False, True), (False, False)]], + [None, [[False, True], [True, False], [False, False]], [[True, False], [False, True], [False, False]]], ) @pytest.mark.parametrize("mapping", [["Colella", {"Lx": 1.0, "Ly": 6.0, "alpha": 0.1, "Lz": 10.0}]]) def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): """Compare Struphy mass matrices to Struphy-legacy mass matrices.""" - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.mhd_operators import MHDOperators @@ -22,6 +21,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): from struphy.feec.utilities import RotationMatrix, compare_arrays, create_equal_random_arrays from struphy.fields_background.equils import ScrewPinch, ShearedSlab from struphy.geometry import domains + from struphy.utils.arrays import xp as np mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() @@ -48,7 +48,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): eq_mhd = ShearedSlab( **{ "a": (mapping[1]["r1"] - mapping[1]["l1"]), - "R0": (mapping[1]["r3"] - mapping[1]["l3"]) / (2 * xp.pi), + "R0": (mapping[1]["r3"] - mapping[1]["l3"]) / (2 * np.pi), "B0": 1.0, "q0": 1.05, "q1": 1.8, @@ -56,14 +56,14 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) elif mapping[0] == "Colella": eq_mhd = ShearedSlab( **{ "a": mapping[1]["Lx"], - "R0": mapping[1]["Lz"] / (2 * xp.pi), + "R0": mapping[1]["Lz"] / (2 * np.pi), "B0": 1.0, "q0": 1.05, "q1": 1.8, @@ -71,7 +71,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) if show_plots: @@ -89,7 +89,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) if show_plots: @@ -101,12 +101,11 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): if dirichlet_bc is not None: for i, knd in enumerate(spl_kind): if knd: - dirichlet_bc[i] = (False, False) + dirichlet_bc[i] = [False, False] else: - dirichlet_bc = [(False, False)] * 3 + dirichlet_bc = [[False, False]] * 3 - dirichlet_bc = tuple(dirichlet_bc) - print(f"{dirichlet_bc =}") + print(f"{dirichlet_bc = }") # derham object derham = Derham(Nel, p, spl_kind, comm=mpi_comm, dirichlet_bc=dirichlet_bc) @@ -124,7 +123,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): # test calling the diagonal method aaa = mass_mats.M0.matrix.diagonal() bbb = mass_mats.M1.matrix.diagonal() - print(f"{aaa =}, {bbb[0, 0] =}, {bbb[0, 1] =}") + print(f"{aaa = }, {bbb[0, 0] = }, {bbb[0, 1] = }") # compare to old STRUPHY bc_old = [[None, None], [None, None], [None, None]] @@ -220,11 +219,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): # Change order of input in callable rM1ninvswitch_psy = mass_mats.create_weighted_mass( - "Hcurl", - "Hcurl", - weights=["sqrt_g", "1/eq_n0", "Ginv"], - name="M1ninv", - assemble=True, + "Hcurl", "Hcurl", weights=["sqrt_g", "1/eq_n0", "Ginv"], name="M1ninv", assemble=True ).dot(x1_psy, apply_bc=True) rot_B = RotationMatrix( @@ -233,11 +228,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): mass_mats.weights[mass_mats.selected_weight].b2_3, ) rM1Bninvswitch_psy = mass_mats.create_weighted_mass( - "Hcurl", - "Hcurl", - weights=["1/eq_n0", "sqrt_g", "Ginv", rot_B, "Ginv"], - name="M1Bninv", - assemble=True, + "Hcurl", "Hcurl", weights=["1/eq_n0", "sqrt_g", "Ginv", rot_B, "Ginv"], name="M1Bninv", assemble=True ).dot(x1_psy, apply_bc=True) # Test matrix free operators @@ -263,11 +254,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): # Change order of input in callable rM1ninvswitch_fre = mass_mats_free.create_weighted_mass( - "Hcurl", - "Hcurl", - weights=["sqrt_g", "1/eq_n0", "Ginv"], - name="M1ninvswitch", - assemble=True, + "Hcurl", "Hcurl", weights=["sqrt_g", "1/eq_n0", "Ginv"], name="M1ninvswitch", assemble=True ).dot(x1_psy, apply_bc=True) rot_B = RotationMatrix( mass_mats_free.weights[mass_mats_free.selected_weight].b2_1, @@ -276,11 +263,7 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): ) rM1Bninvswitch_fre = mass_mats_free.create_weighted_mass( - "Hcurl", - "Hcurl", - weights=["1/eq_n0", "sqrt_g", "Ginv", rot_B, "Ginv"], - name="M1Bninvswitch", - assemble=True, + "Hcurl", "Hcurl", weights=["1/eq_n0", "sqrt_g", "Ginv", rot_B, "Ginv"], name="M1Bninvswitch", assemble=True ).dot(x1_psy, apply_bc=True) # compare output arrays @@ -386,13 +369,12 @@ def test_mass(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): @pytest.mark.parametrize("spl_kind", [[False, True, True], [False, True, False]]) @pytest.mark.parametrize( "dirichlet_bc", - [None, [(False, True), (False, False), (False, True)], [(False, False), (False, False), (True, False)]], + [None, [[False, True], [False, False], [False, True]], [[False, False], [False, False], [True, False]]], ) @pytest.mark.parametrize("mapping", [["IGAPolarCylinder", {"a": 1.0, "Lz": 3.0}]]) def test_mass_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): """Compare Struphy polar mass matrices to Struphy-legacy polar mass matrices.""" - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.mhd_operators import MHDOperators @@ -403,6 +385,7 @@ def test_mass_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): from struphy.fields_background.equils import ScrewPinch from struphy.geometry import domains from struphy.polar.basic import PolarVector + from struphy.utils.arrays import xp as np mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() @@ -436,7 +419,7 @@ def test_mass_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) if show_plots: @@ -448,22 +431,13 @@ def test_mass_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): if dirichlet_bc is not None: for i, knd in enumerate(spl_kind): if knd: - dirichlet_bc[i] = (False, False) + dirichlet_bc[i] = [False, False] else: - dirichlet_bc = [(False, False)] * 3 - - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[False, False]] * 3 # derham object derham = Derham( - Nel, - p, - spl_kind, - comm=mpi_comm, - dirichlet_bc=dirichlet_bc, - with_projectors=False, - polar_ck=1, - domain=domain, + Nel, p, spl_kind, comm=mpi_comm, dirichlet_bc=dirichlet_bc, with_projectors=False, polar_ck=1, domain=domain ) print(f"Rank {mpi_rank} | Local domain : " + str(derham.domain_array[mpi_rank])) @@ -522,11 +496,11 @@ def test_mass_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): x2_pol_psy.tp = x2_psy x3_pol_psy.tp = x3_psy - xp.random.seed(1607) - x0_pol_psy.pol = [xp.random.rand(x0_pol_psy.pol[0].shape[0], x0_pol_psy.pol[0].shape[1])] - x1_pol_psy.pol = [xp.random.rand(x1_pol_psy.pol[n].shape[0], x1_pol_psy.pol[n].shape[1]) for n in range(3)] - x2_pol_psy.pol = [xp.random.rand(x2_pol_psy.pol[n].shape[0], x2_pol_psy.pol[n].shape[1]) for n in range(3)] - x3_pol_psy.pol = [xp.random.rand(x3_pol_psy.pol[0].shape[0], x3_pol_psy.pol[0].shape[1])] + np.random.seed(1607) + x0_pol_psy.pol = [np.random.rand(x0_pol_psy.pol[0].shape[0], x0_pol_psy.pol[0].shape[1])] + x1_pol_psy.pol = [np.random.rand(x1_pol_psy.pol[n].shape[0], x1_pol_psy.pol[n].shape[1]) for n in range(3)] + x2_pol_psy.pol = [np.random.rand(x2_pol_psy.pol[n].shape[0], x2_pol_psy.pol[n].shape[1]) for n in range(3)] + x3_pol_psy.pol = [np.random.rand(x3_pol_psy.pol[0].shape[0], x3_pol_psy.pol[0].shape[1])] # apply boundary conditions to old STRUPHY x0_pol_str = x0_pol_psy.toarray(True) @@ -556,12 +530,12 @@ def test_mass_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): rn_pol_psy = mass_mats.M2n.dot(x2_pol_psy, apply_bc=True) rJ_pol_psy = mass_mats.M2J.dot(x2_pol_psy, apply_bc=True) - assert xp.allclose(r0_pol_str, r0_pol_psy.toarray(True)) - assert xp.allclose(r1_pol_str, r1_pol_psy.toarray(True)) - assert xp.allclose(r2_pol_str, r2_pol_psy.toarray(True)) - assert xp.allclose(r3_pol_str, r3_pol_psy.toarray(True)) - assert xp.allclose(rn_pol_str, rn_pol_psy.toarray(True)) - assert xp.allclose(rJ_pol_str, rJ_pol_psy.toarray(True)) + assert np.allclose(r0_pol_str, r0_pol_psy.toarray(True)) + assert np.allclose(r1_pol_str, r1_pol_psy.toarray(True)) + assert np.allclose(r2_pol_str, r2_pol_psy.toarray(True)) + assert np.allclose(r3_pol_str, r3_pol_psy.toarray(True)) + assert np.allclose(rn_pol_str, rn_pol_psy.toarray(True)) + assert np.allclose(rJ_pol_str, rJ_pol_psy.toarray(True)) # perfrom matrix-vector products (without boundary conditions) r0_pol_str = space.M0(x0_pol_str) @@ -574,12 +548,12 @@ def test_mass_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): r2_pol_psy = mass_mats.M2.dot(x2_pol_psy, apply_bc=False) r3_pol_psy = mass_mats.M3.dot(x3_pol_psy, apply_bc=False) - assert xp.allclose(r0_pol_str, r0_pol_psy.toarray(True)) - assert xp.allclose(r1_pol_str, r1_pol_psy.toarray(True)) - assert xp.allclose(r2_pol_str, r2_pol_psy.toarray(True)) - assert xp.allclose(r3_pol_str, r3_pol_psy.toarray(True)) - assert xp.allclose(rn_pol_str, rn_pol_psy.toarray(True)) - assert xp.allclose(rJ_pol_str, rJ_pol_psy.toarray(True)) + assert np.allclose(r0_pol_str, r0_pol_psy.toarray(True)) + assert np.allclose(r1_pol_str, r1_pol_psy.toarray(True)) + assert np.allclose(r2_pol_str, r2_pol_psy.toarray(True)) + assert np.allclose(r3_pol_str, r3_pol_psy.toarray(True)) + assert np.allclose(rn_pol_str, rn_pol_psy.toarray(True)) + assert np.allclose(rJ_pol_str, rJ_pol_psy.toarray(True)) print(f"Rank {mpi_rank} | All tests passed!") @@ -589,7 +563,7 @@ def test_mass_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): @pytest.mark.parametrize("spl_kind", [[False, True, True], [False, True, False]]) @pytest.mark.parametrize( "dirichlet_bc", - [None, [(False, True), (False, False), (False, True)], [(False, False), (False, False), (True, False)]], + [None, [[False, True], [False, False], [False, True]], [[False, False], [False, False], [True, False]]], ) @pytest.mark.parametrize("mapping", [["HollowCylinder", {"a1": 0.1, "a2": 1.0, "Lz": 18.84955592153876}]]) def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): @@ -598,7 +572,6 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots import time - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from psydac.linalg.solvers import inverse @@ -608,6 +581,7 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots from struphy.feec.utilities import create_equal_random_arrays from struphy.fields_background.equils import ScrewPinch, ShearedSlab from struphy.geometry import domains + from struphy.utils.arrays import xp as np mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() @@ -634,7 +608,7 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots eq_mhd = ShearedSlab( **{ "a": (mapping[1]["r1"] - mapping[1]["l1"]), - "R0": (mapping[1]["r3"] - mapping[1]["l3"]) / (2 * xp.pi), + "R0": (mapping[1]["r3"] - mapping[1]["l3"]) / (2 * np.pi), "B0": 1.0, "q0": 1.05, "q1": 1.8, @@ -642,14 +616,14 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) elif mapping[0] == "Colella": eq_mhd = ShearedSlab( **{ "a": mapping[1]["Lx"], - "R0": mapping[1]["Lz"] / (2 * xp.pi), + "R0": mapping[1]["Lz"] / (2 * np.pi), "B0": 1.0, "q0": 1.05, "q1": 1.8, @@ -657,7 +631,7 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) if show_plots: @@ -675,7 +649,7 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) if show_plots: @@ -687,11 +661,9 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots if dirichlet_bc is not None: for i, knd in enumerate(spl_kind): if knd: - dirichlet_bc[i] = (False, False) + dirichlet_bc[i] = [False, False] else: - dirichlet_bc = [(False, False)] * 3 - - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[False, False]] * 3 # derham object derham = Derham(Nel, p, spl_kind, comm=mpi_comm, dirichlet_bc=dirichlet_bc) @@ -774,27 +746,27 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots print("Done") # compare output arrays - assert xp.allclose(r0.toarray(), r0_pre.toarray()) - assert xp.allclose(r1.toarray(), r1_pre.toarray()) - assert xp.allclose(r2.toarray(), r2_pre.toarray()) - assert xp.allclose(r3.toarray(), r3_pre.toarray()) - assert xp.allclose(rv.toarray(), rv_pre.toarray()) + assert np.allclose(r0.toarray(), r0_pre.toarray()) + assert np.allclose(r1.toarray(), r1_pre.toarray()) + assert np.allclose(r2.toarray(), r2_pre.toarray()) + assert np.allclose(r3.toarray(), r3_pre.toarray()) + assert np.allclose(rv.toarray(), rv_pre.toarray()) - assert xp.allclose(r1n.toarray(), r1n_pre.toarray()) - assert xp.allclose(r2n.toarray(), r2n_pre.toarray()) - assert xp.allclose(rvn.toarray(), rvn_pre.toarray()) + assert np.allclose(r1n.toarray(), r1n_pre.toarray()) + assert np.allclose(r2n.toarray(), r2n_pre.toarray()) + assert np.allclose(rvn.toarray(), rvn_pre.toarray()) - assert xp.allclose(r1Bninv.toarray(), r1Bninv_pre.toarray()) - assert xp.allclose(r1Bninv.toarray(), r1Bninvold_pre.toarray()) - assert xp.allclose(r1Bninvold.toarray(), r1Bninv_pre.toarray()) + assert np.allclose(r1Bninv.toarray(), r1Bninv_pre.toarray()) + assert np.allclose(r1Bninv.toarray(), r1Bninvold_pre.toarray()) + assert np.allclose(r1Bninvold.toarray(), r1Bninv_pre.toarray()) # test if preconditioner satisfies PC * M = Identity if mapping[0] == "Cuboid" or mapping[0] == "HollowCylinder": - assert xp.allclose(mass_mats.M0.dot(M0pre.solve(x0)).toarray(), derham.boundary_ops["0"].dot(x0).toarray()) - assert xp.allclose(mass_mats.M1.dot(M1pre.solve(x1)).toarray(), derham.boundary_ops["1"].dot(x1).toarray()) - assert xp.allclose(mass_mats.M2.dot(M2pre.solve(x2)).toarray(), derham.boundary_ops["2"].dot(x2).toarray()) - assert xp.allclose(mass_mats.M3.dot(M3pre.solve(x3)).toarray(), derham.boundary_ops["3"].dot(x3).toarray()) - assert xp.allclose(mass_mats.Mv.dot(Mvpre.solve(xv)).toarray(), derham.boundary_ops["v"].dot(xv).toarray()) + assert np.allclose(mass_mats.M0.dot(M0pre.solve(x0)).toarray(), derham.boundary_ops["0"].dot(x0).toarray()) + assert np.allclose(mass_mats.M1.dot(M1pre.solve(x1)).toarray(), derham.boundary_ops["1"].dot(x1).toarray()) + assert np.allclose(mass_mats.M2.dot(M2pre.solve(x2)).toarray(), derham.boundary_ops["2"].dot(x2).toarray()) + assert np.allclose(mass_mats.M3.dot(M3pre.solve(x3)).toarray(), derham.boundary_ops["3"].dot(x3).toarray()) + assert np.allclose(mass_mats.Mv.dot(Mvpre.solve(xv)).toarray(), derham.boundary_ops["v"].dot(xv).toarray()) # test preconditioner in iterative solver M0inv = inverse(mass_mats.M0, "pcg", pc=M0pre, tol=1e-8, maxiter=1000) @@ -896,7 +868,7 @@ def test_mass_preconditioner(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots @pytest.mark.parametrize("spl_kind", [[False, True, True], [False, True, False]]) @pytest.mark.parametrize( "dirichlet_bc", - [None, [(False, True), (False, False), (False, True)], [(False, False), (False, False), (True, False)]], + [None, [[False, True], [False, False], [False, True]], [[False, False], [False, False], [True, False]]], ) @pytest.mark.parametrize("mapping", [["IGAPolarCylinder", {"a": 1.0, "Lz": 3.0}]]) def test_mass_preconditioner_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): @@ -905,7 +877,6 @@ def test_mass_preconditioner_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show import time - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from psydac.linalg.solvers import inverse @@ -916,6 +887,7 @@ def test_mass_preconditioner_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show from struphy.fields_background.equils import ScrewPinch from struphy.geometry import domains from struphy.polar.basic import PolarVector + from struphy.utils.arrays import xp as np mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() @@ -949,7 +921,7 @@ def test_mass_preconditioner_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show "n2": 4.0, "na": 0.0, "beta": 0.1, - }, + } ) if show_plots: @@ -961,22 +933,13 @@ def test_mass_preconditioner_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show if dirichlet_bc is not None: for i, knd in enumerate(spl_kind): if knd: - dirichlet_bc[i] = (False, False) + dirichlet_bc[i] = [False, False] else: - dirichlet_bc = [(False, False)] * 3 - - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[False, False]] * 3 # derham object derham = Derham( - Nel, - p, - spl_kind, - comm=mpi_comm, - dirichlet_bc=dirichlet_bc, - with_projectors=False, - polar_ck=1, - domain=domain, + Nel, p, spl_kind, comm=mpi_comm, dirichlet_bc=dirichlet_bc, with_projectors=False, polar_ck=1, domain=domain ) print(f"Rank {mpi_rank} | Local domain : " + str(derham.domain_array[mpi_rank])) @@ -1016,11 +979,11 @@ def test_mass_preconditioner_polar(Nel, p, spl_kind, dirichlet_bc, mapping, show x2_pol.tp = x2 x3_pol.tp = x3 - xp.random.seed(1607) - x0_pol.pol = [xp.random.rand(x0_pol.pol[0].shape[0], x0_pol.pol[0].shape[1])] - x1_pol.pol = [xp.random.rand(x1_pol.pol[n].shape[0], x1_pol.pol[n].shape[1]) for n in range(3)] - x2_pol.pol = [xp.random.rand(x2_pol.pol[n].shape[0], x2_pol.pol[n].shape[1]) for n in range(3)] - x3_pol.pol = [xp.random.rand(x3_pol.pol[0].shape[0], x3_pol.pol[0].shape[1])] + np.random.seed(1607) + x0_pol.pol = [np.random.rand(x0_pol.pol[0].shape[0], x0_pol.pol[0].shape[1])] + x1_pol.pol = [np.random.rand(x1_pol.pol[n].shape[0], x1_pol.pol[n].shape[1]) for n in range(3)] + x2_pol.pol = [np.random.rand(x2_pol.pol[n].shape[0], x2_pol.pol[n].shape[1]) for n in range(3)] + x3_pol.pol = [np.random.rand(x3_pol.pol[0].shape[0], x3_pol.pol[0].shape[1])] # test preconditioner in iterative solver and compare to case without preconditioner M0inv = inverse(mass_mats.M0, "pcg", pc=M0pre, tol=1e-8, maxiter=500) diff --git a/src/struphy/feec/tests/test_toarray_struphy.py b/src/struphy/feec/tests/test_toarray_struphy.py index 90427d8e4..c1d03249d 100644 --- a/src/struphy/feec/tests/test_toarray_struphy.py +++ b/src/struphy/feec/tests/test_toarray_struphy.py @@ -5,21 +5,20 @@ @pytest.mark.parametrize("p", [[3, 2, 1]]) @pytest.mark.parametrize("spl_kind", [[False, True, True], [True, False, False]]) @pytest.mark.parametrize( - "mapping", - [["Cuboid", {"l1": 1.0, "r1": 2.0, "l2": 10.0, "r2": 20.0, "l3": 100.0, "r3": 200.0}]], + "mapping", [["Cuboid", {"l1": 1.0, "r1": 2.0, "l2": 10.0, "r2": 20.0, "l3": 100.0, "r3": 200.0}]] ) def test_toarray_struphy(Nel, p, spl_kind, mapping): """ TODO """ - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.feec.mass import WeightedMassOperators from struphy.feec.psydac_derham import Derham from struphy.feec.utilities import compare_arrays, create_equal_random_arrays from struphy.geometry import domains + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -71,30 +70,30 @@ def test_toarray_struphy(Nel, p, spl_kind, mapping): v3arr = v3arr[0].flatten() # not in-place - compare_arrays(M0.dot(v0), xp.matmul(M0arr, v0arr), rank) - compare_arrays(M1.dot(v1), xp.matmul(M1arr, v1arr), rank) - compare_arrays(M2.dot(v2), xp.matmul(M2arr, v2arr), rank) - compare_arrays(M3.dot(v3), xp.matmul(M3arr, v3arr), rank) + compare_arrays(M0.dot(v0), np.matmul(M0arr, v0arr), rank) + compare_arrays(M1.dot(v1), np.matmul(M1arr, v1arr), rank) + compare_arrays(M2.dot(v2), np.matmul(M2arr, v2arr), rank) + compare_arrays(M3.dot(v3), np.matmul(M3arr, v3arr), rank) # Now we test the in-place version - IM0 = xp.zeros([M0.codomain.dimension, M0.domain.dimension], dtype=M0.dtype) - IM1 = xp.zeros([M1.codomain.dimension, M1.domain.dimension], dtype=M1.dtype) - IM2 = xp.zeros([M2.codomain.dimension, M2.domain.dimension], dtype=M2.dtype) - IM3 = xp.zeros([M3.codomain.dimension, M3.domain.dimension], dtype=M3.dtype) + IM0 = np.zeros([M0.codomain.dimension, M0.domain.dimension], dtype=M0.dtype) + IM1 = np.zeros([M1.codomain.dimension, M1.domain.dimension], dtype=M1.dtype) + IM2 = np.zeros([M2.codomain.dimension, M2.domain.dimension], dtype=M2.dtype) + IM3 = np.zeros([M3.codomain.dimension, M3.domain.dimension], dtype=M3.dtype) M0.toarray_struphy(out=IM0) M1.toarray_struphy(out=IM1) M2.toarray_struphy(out=IM2) M3.toarray_struphy(out=IM3) - compare_arrays(M0.dot(v0), xp.matmul(IM0, v0arr), rank) - compare_arrays(M1.dot(v1), xp.matmul(IM1, v1arr), rank) - compare_arrays(M2.dot(v2), xp.matmul(IM2, v2arr), rank) - compare_arrays(M3.dot(v3), xp.matmul(IM3, v3arr), rank) + compare_arrays(M0.dot(v0), np.matmul(IM0, v0arr), rank) + compare_arrays(M1.dot(v1), np.matmul(IM1, v1arr), rank) + compare_arrays(M2.dot(v2), np.matmul(IM2, v2arr), rank) + compare_arrays(M3.dot(v3), np.matmul(IM3, v3arr), rank) print("test_toarray_struphy passed!") - # assert xp.allclose(out1.toarray(), v1.toarray(), atol=1e-5) + # assert np.allclose(out1.toarray(), v1.toarray(), atol=1e-5) if __name__ == "__main__": diff --git a/src/struphy/feec/tests/test_tosparse_struphy.py b/src/struphy/feec/tests/test_tosparse_struphy.py index 48cbfd7a2..e4dc75de9 100644 --- a/src/struphy/feec/tests/test_tosparse_struphy.py +++ b/src/struphy/feec/tests/test_tosparse_struphy.py @@ -7,15 +7,13 @@ @pytest.mark.parametrize("p", [[3, 2, 1]]) @pytest.mark.parametrize("spl_kind", [[False, True, True], [True, False, False]]) @pytest.mark.parametrize( - "mapping", - [["Cuboid", {"l1": 1.0, "r1": 2.0, "l2": 10.0, "r2": 20.0, "l3": 100.0, "r3": 200.0}]], + "mapping", [["Cuboid", {"l1": 1.0, "r1": 2.0, "l2": 10.0, "r2": 20.0, "l3": 100.0, "r3": 200.0}]] ) def test_tosparse_struphy(Nel, p, spl_kind, mapping): """ TODO """ - import cunumpy as xp from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI @@ -23,6 +21,7 @@ def test_tosparse_struphy(Nel, p, spl_kind, mapping): from struphy.feec.psydac_derham import Derham from struphy.feec.utilities import create_equal_random_arrays from struphy.geometry import domains + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -103,39 +102,27 @@ def test_tosparse_struphy(Nel, p, spl_kind, mapping): comm.Allreduce(v3_local, v3_global, op=MPI.SUM) # not in-place - assert xp.allclose(v0_global, M0arr.dot(v0arr)) - assert xp.allclose(v1_global, M1arr.dot(v1arr)) - assert xp.allclose(v2_global, M2arr.dot(v2arr)) - assert xp.allclose(v3_global, M3arr.dot(v3arr)) - assert xp.allclose(v0_global, M0arrad.dot(v0arr)) - assert xp.allclose(v1_global, M1arrad.dot(v1arr)) - assert xp.allclose(v2_global, M2arrad.dot(v2arr)) + assert np.allclose(v0_global, M0arr.dot(v0arr)) + assert np.allclose(v1_global, M1arr.dot(v1arr)) + assert np.allclose(v2_global, M2arr.dot(v2arr)) + assert np.allclose(v3_global, M3arr.dot(v3arr)) + assert np.allclose(v0_global, M0arrad.dot(v0arr)) + assert np.allclose(v1_global, M1arrad.dot(v1arr)) + assert np.allclose(v2_global, M2arrad.dot(v2arr)) print("test_tosparse_struphy passed!") if __name__ == "__main__": test_tosparse_struphy( - [32, 2, 2], - [2, 1, 1], - [True, True, True], - ["Colella", {"Lx": 1.0, "Ly": 2.0, "alpha": 0.5, "Lz": 3.0}], + [32, 2, 2], [2, 1, 1], [True, True, True], ["Colella", {"Lx": 1.0, "Ly": 2.0, "alpha": 0.5, "Lz": 3.0}] ) test_tosparse_struphy( - [2, 32, 2], - [1, 2, 1], - [True, True, True], - ["Colella", {"Lx": 1.0, "Ly": 2.0, "alpha": 0.5, "Lz": 3.0}], + [2, 32, 2], [1, 2, 1], [True, True, True], ["Colella", {"Lx": 1.0, "Ly": 2.0, "alpha": 0.5, "Lz": 3.0}] ) test_tosparse_struphy( - [2, 2, 32], - [1, 1, 2], - [True, True, True], - ["Colella", {"Lx": 1.0, "Ly": 2.0, "alpha": 0.5, "Lz": 3.0}], + [2, 2, 32], [1, 1, 2], [True, True, True], ["Colella", {"Lx": 1.0, "Ly": 2.0, "alpha": 0.5, "Lz": 3.0}] ) test_tosparse_struphy( - [2, 2, 32], - [1, 1, 2], - [False, False, False], - ["Colella", {"Lx": 1.0, "Ly": 2.0, "alpha": 0.5, "Lz": 3.0}], + [2, 2, 32], [1, 1, 2], [False, False, False], ["Colella", {"Lx": 1.0, "Ly": 2.0, "alpha": 0.5, "Lz": 3.0}] ) diff --git a/src/struphy/feec/tests/xx_test_preconds.py b/src/struphy/feec/tests/xx_test_preconds.py index 267e0279a..a5e14b8d7 100644 --- a/src/struphy/feec/tests/xx_test_preconds.py +++ b/src/struphy/feec/tests/xx_test_preconds.py @@ -12,7 +12,6 @@ ], ) def test_mass_preconditioner(Nel, p, spl_kind, mapping): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from psydac.linalg.block import BlockVector from psydac.linalg.stencil import StencilVector @@ -22,6 +21,7 @@ def test_mass_preconditioner(Nel, p, spl_kind, mapping): from struphy.feec.preconditioner import MassMatrixPreconditioner from struphy.feec.psydac_derham import Derham from struphy.geometry import domains + from struphy.utils.arrays import xp as np MPI_COMM = MPI.COMM_WORLD @@ -40,22 +40,22 @@ def test_mass_preconditioner(Nel, p, spl_kind, mapping): v = [] v += [StencilVector(derham.V0.coeff_space)] - v[-1]._data = xp.random.rand(*v[-1]._data.shape) + v[-1]._data = np.random.rand(*v[-1]._data.shape) v += [BlockVector(derham.V1.coeff_space)] for v1i in v[-1]: - v1i._data = xp.random.rand(*v1i._data.shape) + v1i._data = np.random.rand(*v1i._data.shape) v += [BlockVector(derham.V2.coeff_space)] for v1i in v[-1]: - v1i._data = xp.random.rand(*v1i._data.shape) + v1i._data = np.random.rand(*v1i._data.shape) v += [StencilVector(derham.V3.coeff_space)] - v[-1]._data = xp.random.rand(*v[-1]._data.shape) + v[-1]._data = np.random.rand(*v[-1]._data.shape) v += [BlockVector(derham.V0vec.coeff_space)] for v1i in v[-1]: - v1i._data = xp.random.rand(*v1i._data.shape) + v1i._data = np.random.rand(*v1i._data.shape) # assemble preconditioners M_pre = [] @@ -68,7 +68,7 @@ def test_mass_preconditioner(Nel, p, spl_kind, mapping): n = "v" if domain.kind_map == 10 or domain.kind_map == 11: - assert xp.allclose(M._mat.toarray(), M_p.matrix.toarray()) + assert np.allclose(M._mat.toarray(), M_p.matrix.toarray()) print(f'Matrix assertion for space {n} case "Cuboid/HollowCylinder" passed.') inv_A = InverseLinearOperator(M, pc=M_p, tol=1e-8, maxiter=5000) diff --git a/src/struphy/feec/utilities.py b/src/struphy/feec/utilities.py index 2fffe38e3..4ceb7842a 100644 --- a/src/struphy/feec/utilities.py +++ b/src/struphy/feec/utilities.py @@ -1,14 +1,13 @@ -import cunumpy as xp from psydac.api.essential_bc import apply_essential_bc_stencil from psydac.fem.tensor import TensorFemSpace from psydac.fem.vector import VectorFemSpace -from psydac.linalg.basic import Vector from psydac.linalg.block import BlockLinearOperator, BlockVector from psydac.linalg.stencil import StencilMatrix, StencilVector import struphy.feec.utilities_kernels as kernels from struphy.feec import banded_to_stencil_kernels as bts from struphy.polar.basic import PolarVector +from struphy.utils.arrays import xp as np class RotationMatrix: @@ -41,15 +40,15 @@ def __init__(self, *vec_fun): def __call__(self, e1, e2, e3): # array from 2d list gives 3x3 array is in the first two indices - tmp = xp.array( + tmp = np.array( [ [self._cross_mask[m][n] * fun(e1, e2, e3) for n, fun in enumerate(row)] for m, row in enumerate(self._funs) - ], + ] ) # numpy operates on the last two indices with @ - return xp.transpose(tmp, axes=(2, 3, 4, 0, 1)) + return np.transpose(tmp, axes=(2, 3, 4, 0, 1)) def create_equal_random_arrays(V, seed=123, flattened=False): @@ -77,7 +76,7 @@ def create_equal_random_arrays(V, seed=123, flattened=False): assert isinstance(V, (TensorFemSpace, VectorFemSpace)) - xp.random.seed(seed) + np.random.seed(seed) arr = [] @@ -93,15 +92,13 @@ def create_equal_random_arrays(V, seed=123, flattened=False): dims = V.coeff_space.npts - arr += [xp.random.rand(*dims)] + arr += [np.random.rand(*dims)] s = arr_psy.starts e = arr_psy.ends arr_psy[s[0] : e[0] + 1, s[1] : e[1] + 1, s[2] : e[2] + 1] = arr[-1][ - s[0] : e[0] + 1, - s[1] : e[1] + 1, - s[2] : e[2] + 1, + s[0] : e[0] + 1, s[1] : e[1] + 1, s[2] : e[2] + 1 ] if flattened: @@ -113,24 +110,22 @@ def create_equal_random_arrays(V, seed=123, flattened=False): for d, block in enumerate(arr_psy.blocks): dims = V.spaces[d].coeff_space.npts - arr += [xp.random.rand(*dims)] + arr += [np.random.rand(*dims)] s = block.starts e = block.ends arr_psy[d][s[0] : e[0] + 1, s[1] : e[1] + 1, s[2] : e[2] + 1] = arr[-1][ - s[0] : e[0] + 1, - s[1] : e[1] + 1, - s[2] : e[2] + 1, + s[0] : e[0] + 1, s[1] : e[1] + 1, s[2] : e[2] + 1 ] if flattened: - arr = xp.concatenate( + arr = np.concatenate( ( arr[0].flatten(), arr[1].flatten(), arr[2].flatten(), - ), + ) ) arr_psy.update_ghost_regions() @@ -172,11 +167,11 @@ def compare_arrays(arr_psy, arr, rank, atol=1e-14, verbose=False): arr_psy.space.npts[2], )[s[0] : e[0] + 1, s[1] : e[1] + 1, s[2] : e[2] + 1] - assert xp.allclose(tmp1, tmp2, atol=atol) + assert np.allclose(tmp1, tmp2, atol=atol) elif isinstance(arr_psy, BlockVector): if not (isinstance(arr, tuple) or isinstance(arr, list)): - arrs = xp.split( + arrs = np.split( arr, [ arr_psy.blocks[0].shape[0], @@ -201,7 +196,7 @@ def compare_arrays(arr_psy, arr, rank, atol=1e-14, verbose=False): s[2] : e[2] + 1, ] - assert xp.allclose(tmp1, tmp2, atol=atol) + assert np.allclose(tmp1, tmp2, atol=atol) elif isinstance(arr_psy, StencilMatrix): s = arr_psy.codomain.starts @@ -220,7 +215,7 @@ def compare_arrays(arr_psy, arr, rank, atol=1e-14, verbose=False): if tmp_arr.shape == tmp1.shape: tmp2 = tmp_arr else: - tmp2 = xp.zeros( + tmp2 = np.zeros( ( e[0] + 1 - s[0], e[1] + 1 - s[1], @@ -233,7 +228,7 @@ def compare_arrays(arr_psy, arr, rank, atol=1e-14, verbose=False): ) bts.band_to_stencil_3d(tmp_arr, tmp2) - assert xp.allclose(tmp1, tmp2, atol=atol) + assert np.allclose(tmp1, tmp2, atol=atol) elif isinstance(arr_psy, BlockLinearOperator): for row_psy, row in zip(arr_psy.blocks, arr): @@ -264,7 +259,7 @@ def compare_arrays(arr_psy, arr, rank, atol=1e-14, verbose=False): if tmp_mat.shape == tmp1.shape: tmp2 = tmp_mat else: - tmp2 = xp.zeros( + tmp2 = np.zeros( ( e[0] + 1 - s[0], e[1] + 1 - s[1], @@ -277,7 +272,7 @@ def compare_arrays(arr_psy, arr, rank, atol=1e-14, verbose=False): ) bts.band_to_stencil_3d(tmp_mat, tmp2) - assert xp.allclose(tmp1, tmp2, atol=atol) + assert np.allclose(tmp1, tmp2, atol=atol) else: raise AssertionError("Wrong input type.") @@ -288,7 +283,7 @@ def compare_arrays(arr_psy, arr, rank, atol=1e-14, verbose=False): ) -def apply_essential_bc_to_array(space_id: str, vector: Vector, bc: tuple): +def apply_essential_bc_to_array(space_id, vector, bc): """ Sets entries corresponding to boundary B-splines to zero. @@ -297,15 +292,15 @@ def apply_essential_bc_to_array(space_id: str, vector: Vector, bc: tuple): space_id : str The name of the continuous functions space the given vector belongs to (H1, Hcurl, Hdiv, L2 or H1vec). - vector : Vector + vector : StencilVector | BlockVector The vector whose boundary values shall be set to zero. - bc : tuple[tuple[bool]] + bc : list[list[bool]] Whether to apply homogeneous Dirichlet boundary conditions (at left or right boundary in each direction). """ assert isinstance(vector, (StencilVector, BlockVector, PolarVector)) - assert isinstance(bc, tuple) + assert isinstance(bc, list) assert len(bc) == 3 if isinstance(vector, PolarVector): diff --git a/src/struphy/feec/utilities_local_projectors.py b/src/struphy/feec/utilities_local_projectors.py index 3ce6590f2..5aa9a5b61 100644 --- a/src/struphy/feec/utilities_local_projectors.py +++ b/src/struphy/feec/utilities_local_projectors.py @@ -1,6 +1,5 @@ -import cunumpy as xp - from struphy.feec.local_projectors_kernels import are_quadrature_points_zero, get_rows, select_quasi_points +from struphy.utils.arrays import xp as np def split_points( @@ -33,7 +32,7 @@ def split_points( shifts : 1d int array For each one of the three spatial directions it determines by which amount to shift the position index (pos) in case we have to loop over the evaluation points. - pts : list of xp.array + pts : list of np.array 3D list of 2D array with the quasi-interpolation points (or Gauss-Legendre quadrature points for histopolation). In format (ns, nb, np) = (spatial direction, B-spline index, point) for StencilVector spaces . @@ -50,7 +49,7 @@ def split_points( npts : list of ints Contains the number of B-splines for each one of the three spatial directions. - periodic : 1D bool xp.array + periodic : 1D bool np.array For each one of the three spatial directions contains the information of whether the B-splines are periodic or not. wij: 3d float array @@ -75,18 +74,18 @@ def split_points( """ # We iterate over the three spatial directions for n, pt in enumerate(pts): - original_pts_size[n] = xp.shape(pt)[0] + original_pts_size[n] = np.shape(pt)[0] # We initialize localpts with as many entries as the global pt, but with all entries being -1 # This function will change the values of the needed entries from -1 to the value of the point. if IoH[n] == "I": - localpts = xp.full((xp.shape(pt)[0]), fill_value=-1, dtype=float) + localpts = np.full((np.shape(pt)[0]), fill_value=-1, dtype=float) elif IoH[n] == "H": - localpts = xp.full((xp.shape(pt)), fill_value=-1, dtype=float) + localpts = np.full((np.shape(pt)), fill_value=-1, dtype=float) for i in range(starts[n], ends[n] + 1): startj1, endj1 = select_quasi_points(int(i), int(p[n]), int(npts[n]), bool(periodic[n])) for j1 in range(lenj[n]): - if startj1 + j1 < xp.shape(pt)[0]: + if startj1 + j1 < np.shape(pt)[0]: pos = startj1 + j1 else: pos = int(startj1 + j1 + shift[n]) @@ -98,42 +97,42 @@ def split_points( localpts[pos] = pt[pos] # We get the local points by grabing only the values different from -1. if IoH[n] == "I": - localpos = xp.where(localpts != -1)[0] + localpos = np.where(localpts != -1)[0] elif IoH[n] == "H": - localpos = xp.where(localpts[:, 0] != -1)[0] + localpos = np.where(localpts[:, 0] != -1)[0] localpts = localpts[localpos] - localptsout.append(xp.array(localpts)) + localptsout.append(np.array(localpts)) ## # We build the index_translation array that shall turn global indices into local indices ## - mini_indextranslation = xp.full( - (xp.shape(pt)[0]), + mini_indextranslation = np.full( + (np.shape(pt)[0]), fill_value=-1, dtype=int, ) for i, j in enumerate(localpos): mini_indextranslation[j] = i - index_translation.append(xp.array(mini_indextranslation)) + index_translation.append(np.array(mini_indextranslation)) ## # We build the inv_index_translation that shall turn local indices into global indices ## - inv_mini_indextranslation = xp.full( - (xp.shape(localptsout[-1])[0]), + inv_mini_indextranslation = np.full( + (np.shape(localptsout[-1])[0]), fill_value=-1, dtype=int, ) for i, j in enumerate(localpos): inv_mini_indextranslation[i] = j - inv_index_translation.append(xp.array(inv_mini_indextranslation)) + inv_index_translation.append(np.array(inv_mini_indextranslation)) def get_values_and_indices_splines(Nbasis, degree, periodic, spans, values): - """Given an array with the values of the splines evaluated at certain points this function returns a xp.array that tell us the index of each spline. So we can know to which spline each + """Given an array with the values of the splines evaluated at certain points this function returns a np.array that tell us the index of each spline. So we can know to which spline each value corresponds. It also modifies the evaluation values in the case we have one spline of degree one with periodic boundary conditions, so it is artificially equal to the identity. Parameters @@ -147,31 +146,31 @@ def get_values_and_indices_splines(Nbasis, degree, periodic, spans, values): periodic : bool Whether we have periodic boundary conditions or nor. - span : xp.array + span : np.array 2d array indexed by (n, nq), where n is the interval and nq is the quadrature point in the interval. - values : xp.array + values : np.array 3d array of values of basis functions indexed by (n, nq, basis function). Returns ------- - eval_indeces : xp.array + eval_indeces : np.array 3d array of basis functions indices, indexed by (n, nq, basis function). - values : xp.array + values : np.array 3d array of values of basis functions indexed by (n, nq, basis function). """ # In this case we want this spatial direction to be "neglected", that means we artificially set the values of the B-spline to 1 at all points. So it becomes the multiplicative identity. if Nbasis == 1 and degree == 1 and periodic: # Set all values to 1 for the identity case - values = xp.ones((values.shape[0], values.shape[1], 1)) - eval_indeces = xp.zeros_like(values, dtype=int) + values = np.ones((values.shape[0], values.shape[1], 1)) + eval_indeces = np.zeros_like(values, dtype=int) else: - eval_indeces = xp.zeros_like(values, dtype=int) - for i in range(xp.shape(spans)[0]): - for k in range(xp.shape(spans)[1]): + eval_indeces = np.zeros_like(values, dtype=int) + for i in range(np.shape(spans)[0]): + for k in range(np.shape(spans)[1]): for j in range(degree + 1): eval_indeces[i, k, j] = (spans[i][k] - degree + j) % Nbasis @@ -180,31 +179,31 @@ def get_values_and_indices_splines(Nbasis, degree, periodic, spans, values): def get_one_spline(a, values, eval_indeces): """Given the spline index, an array with the splines evaluated at the evaluation points and another array with the indices indicating to which spline each value corresponds, this function returns - a 1d xp.array with the desired spline evaluated at all evaluation points. + a 1d np.array with the desired spline evaluated at all evaluation points. Parameters ---------- a : int Spline index - values : xp.array + values : np.array 3d array of values of basis functions indexed by (n, nq, basis function). - eval_indeces : xp.array + eval_indeces : np.array 3d array of basis functions indices, indexed by (n, nq, basis function). Returns ------- - my_values : xp.array + my_values : np.array 1d array of values for the spline evaluated at all evaluation points. """ - my_values = xp.zeros(xp.shape(values)[0] * xp.shape(values)[1]) - for i in range(xp.shape(values)[0]): - for j in range(xp.shape(values)[1]): - for k in range(xp.shape(values)[2]): + my_values = np.zeros(np.shape(values)[0] * np.shape(values)[1]) + for i in range(np.shape(values)[0]): + for j in range(np.shape(values)[1]): + for k in range(np.shape(values)[2]): if eval_indeces[i, j, k] == a: - my_values[i * xp.shape(values)[1] + j] = values[i, j, k] + my_values[i * np.shape(values)[1] + j] = values[i, j, k] break return my_values @@ -214,7 +213,7 @@ def get_span_and_basis(pts, space): Parameters ---------- - pts : xp.array + pts : np.array 2d array of points (ii, iq) = (interval, quadrature point). space : SplineSpace @@ -222,10 +221,10 @@ def get_span_and_basis(pts, space): Returns ------- - span : xp.array + span : np.array 2d array indexed by (n, nq), where n is the interval and nq is the quadrature point in the interval. - basis : xp.array + basis : np.array 3d array of values of basis functions indexed by (n, nq, basis function). """ @@ -235,8 +234,8 @@ def get_span_and_basis(pts, space): T = space.knots p = space.degree - span = xp.zeros(pts.shape, dtype=int) - basis = xp.zeros((*pts.shape, p + 1), dtype=float) + span = np.zeros(pts.shape, dtype=int) + basis = np.zeros((*pts.shape, p + 1), dtype=float) for n in range(pts.shape[0]): for nq in range(pts.shape[1]): @@ -464,7 +463,7 @@ def get_non_zero_B_spline_indices(periodic, IoH, p, B_nbasis, starts, ends, Basi stuck, ) - Basis_functions_indices_B.append(xp.array(aux_indices)) + Basis_functions_indices_B.append(np.array(aux_indices)) def get_non_zero_D_spline_indices(periodic, IoH, p, D_nbasis, starts, ends, Basis_functions_indices_D): @@ -522,7 +521,7 @@ def get_non_zero_D_spline_indices(periodic, IoH, p, D_nbasis, starts, ends, Basi stuck, ) - Basis_functions_indices_D.append(xp.array(aux_indices)) + Basis_functions_indices_D.append(np.array(aux_indices)) def build_translation_list_for_non_zero_spline_indices( @@ -584,18 +583,18 @@ def build_translation_list_for_non_zero_spline_indices( """ translation_indices_B_or_D_splines = [ { - "B": xp.full((B_nbasis[h]), fill_value=-1, dtype=int), - "D": xp.full((D_nbasis[h]), fill_value=-1, dtype=int), + "B": np.full((B_nbasis[h]), fill_value=-1, dtype=int), + "D": np.full((D_nbasis[h]), fill_value=-1, dtype=int), } for h in range(3) ] for h in range(3): - translation_indices_B_or_D_splines[h]["B"][Basis_functions_indices_B[h]] = xp.arange( - len(Basis_functions_indices_B[h]), + translation_indices_B_or_D_splines[h]["B"][Basis_functions_indices_B[h]] = np.arange( + len(Basis_functions_indices_B[h]) ) - translation_indices_B_or_D_splines[h]["D"][Basis_functions_indices_D[h]] = xp.arange( - len(Basis_functions_indices_D[h]), + translation_indices_B_or_D_splines[h]["D"][Basis_functions_indices_D[h]] = np.arange( + len(Basis_functions_indices_D[h]) ) if sp_id in {"Hcurl", "Hdiv", "H1vec"}: @@ -610,11 +609,7 @@ def build_translation_list_for_non_zero_spline_indices( def evaluate_relevant_splines_at_relevant_points( - localpts, - Bspaces_1d, - Dspaces_1d, - Basis_functions_indices_B, - Basis_functions_indices_D, + localpts, Bspaces_1d, Dspaces_1d, Basis_functions_indices_B, Basis_functions_indices_D ): """This function evaluates all the B and D-splines that produce non-zeros in the BasisProjectionOperatorLocal's rows that belong to the current MPI rank over all the local evaluation points. They are store as float arrays in a dictionary of lists. @@ -658,7 +653,7 @@ def evaluate_relevant_splines_at_relevant_points( for h in range(3): # Reshape localpts[h] if necessary localpts_reshaped = ( - localpts[h].reshape((xp.shape(localpts[h])[0], 1)) if len(xp.shape(localpts[h])) == 1 else localpts[h] + localpts[h].reshape((np.shape(localpts[h])[0], 1)) if len(np.shape(localpts[h])) == 1 else localpts[h] ) # Get spans and evaluation values for B-splines and D-splines @@ -697,15 +692,7 @@ def evaluate_relevant_splines_at_relevant_points( def determine_non_zero_rows_for_each_spline( - Basis_functions_indices_B, - Basis_functions_indices_D, - starts, - ends, - p, - B_nbasis, - D_nbasis, - periodic, - IoH, + Basis_functions_indices_B, Basis_functions_indices_D, starts, ends, p, B_nbasis, D_nbasis, periodic, IoH ): """This function determines for which rows (amongst those belonging to the current MPI rank) of the BasisProjectionOperatorLocal each B and D spline, of relevance for the current MPI rank, produces non-zero entries and annotates this regions of non-zeros by saving the rows at which each region starts and ends. @@ -772,7 +759,7 @@ def determine_non_zero_rows_for_each_spline( def process_splines(indices, nbasis, is_D, h): for i in indices[h]: - aux = xp.zeros((ends[h] + 1 - starts[h]), dtype=int) + aux = np.zeros((ends[h] + 1 - starts[h]), dtype=int) get_rows( int(i), int(starts[h]), @@ -786,8 +773,8 @@ def process_splines(indices, nbasis, is_D, h): ) rangestart, rangeend = transform_into_ranges(aux) key = "D" if is_D else "B" - rows_B_or_D_splines[h][key].append(xp.array(rangestart, dtype=int)) - rowe_B_or_D_splines[h][key].append(xp.array(rangeend, dtype=int)) + rows_B_or_D_splines[h][key].append(np.array(rangestart, dtype=int)) + rowe_B_or_D_splines[h][key].append(np.array(rangeend, dtype=int)) for h in range(3): process_splines(Basis_functions_indices_B, B_nbasis, False, h) @@ -804,8 +791,7 @@ def process_splines(indices, nbasis, is_D, h): def get_splines_that_are_relevant_for_at_least_one_block( - Basis_function_indices_agreggated_B, - Basis_function_indices_agreggated_D, + Basis_function_indices_agreggated_B, Basis_function_indices_agreggated_D ): """This function builds one list with all the B-spline indices (and another one for the D-splines) that are required for at least one block of the FE coefficients the current MPI rank needs to build its share of the BasisProjectionOperatorLocal. @@ -904,7 +890,7 @@ def is_spline_zero_at_quadrature_points( for h in range(3): if necessary_direction[h]: for i in Basis_functions_indices_B[h]: - Auxiliar = xp.ones((xp.shape(localpts[h])[0]), dtype=int) + Auxiliar = np.ones((np.shape(localpts[h])[0]), dtype=int) are_quadrature_points_zero( Auxiliar, int( @@ -915,7 +901,7 @@ def is_spline_zero_at_quadrature_points( are_zero_B_or_D_splines[h]["B"].append(Auxiliar) for i in Basis_functions_indices_D[h]: - Auxiliar = xp.ones((xp.shape(localpts[h])[0]), dtype=int) + Auxiliar = np.ones((np.shape(localpts[h])[0]), dtype=int) are_quadrature_points_zero( Auxiliar, int( diff --git a/src/struphy/feec/variational_utilities.py b/src/struphy/feec/variational_utilities.py index 8174a1a5b..61773122e 100644 --- a/src/struphy/feec/variational_utilities.py +++ b/src/struphy/feec/variational_utilities.py @@ -1,6 +1,5 @@ from copy import deepcopy -import cunumpy as xp from psydac.linalg.basic import IdentityOperator, Vector from psydac.linalg.block import BlockVector from psydac.linalg.solvers import inverse @@ -13,6 +12,7 @@ ) from struphy.feec.linear_operators import LinOpWithTransp from struphy.feec.psydac_derham import Derham +from struphy.utils.arrays import xp as np class BracketOperator(LinOpWithTransp): @@ -94,7 +94,7 @@ def __init__( self.Pcoord3 = CoordinateProjector(2, derham.Vh_pol["v"], derham.Vh_pol["0"]) @ derham.boundary_ops["v"] # Initialize the BasisProjectionOperators - if derham._with_local_projectors: + if derham._with_local_projectors == True: self.PiuT = BasisProjectionOperatorLocal( P0, V1h, @@ -192,10 +192,10 @@ def __init__( # Create tmps for later use in evaluating on the grid grid_shape = tuple([len(loc_grid) for loc_grid in interpolation_grid]) - self._vf_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._gvf1_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._gvf2_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._gvf3_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._vf_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._gvf1_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._gvf2_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._gvf3_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] # gradient of the component of the vector field grad = derham.grad_bcfree @@ -264,27 +264,19 @@ def dot(self, v, out=None): self.gv3f.vector = grad_3_v vf_values = self.vf.eval_tp_fixed_loc( - self.interpolation_grid_spans, - [self.interpolation_grid_bn] * 3, - out=self._vf_values, + self.interpolation_grid_spans, [self.interpolation_grid_bn] * 3, out=self._vf_values ) gvf1_values = self.gv1f.eval_tp_fixed_loc( - self.interpolation_grid_spans, - self.interpolation_grid_gradient, - out=self._gvf1_values, + self.interpolation_grid_spans, self.interpolation_grid_gradient, out=self._gvf1_values ) gvf2_values = self.gv2f.eval_tp_fixed_loc( - self.interpolation_grid_spans, - self.interpolation_grid_gradient, - out=self._gvf2_values, + self.interpolation_grid_spans, self.interpolation_grid_gradient, out=self._gvf2_values ) gvf3_values = self.gv3f.eval_tp_fixed_loc( - self.interpolation_grid_spans, - self.interpolation_grid_gradient, - out=self._gvf3_values, + self.interpolation_grid_spans, self.interpolation_grid_gradient, out=self._gvf3_values ) self.PiuT.update_weights([[vf_values[0], vf_values[1], vf_values[2]]]) @@ -386,13 +378,13 @@ def __init__(self, derham, transposed=False, weights=None): ) grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_0]) - self._f_0_values = xp.zeros(grid_shape, dtype=float) + self._f_0_values = np.zeros(grid_shape, dtype=float) grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_1]) - self._f_1_values = xp.zeros(grid_shape, dtype=float) + self._f_1_values = np.zeros(grid_shape, dtype=float) grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_2]) - self._f_2_values = xp.zeros(grid_shape, dtype=float) + self._f_2_values = np.zeros(grid_shape, dtype=float) @property def domain(self): @@ -541,7 +533,7 @@ def __init__(self, derham, transposed=False, weights=None): ) grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_0]) - self._bf0_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._bf0_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] self.hist_grid_0_b = [ [self.hist_grid_0_bn[0], self.hist_grid_0_bd[1], self.hist_grid_0_bd[2]], [ @@ -552,7 +544,7 @@ def __init__(self, derham, transposed=False, weights=None): [self.hist_grid_0_bd[0], self.hist_grid_0_bd[1], self.hist_grid_0_bn[2]], ] grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_1]) - self._bf1_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._bf1_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] self.hist_grid_1_b = [ [self.hist_grid_1_bn[0], self.hist_grid_1_bd[1], self.hist_grid_1_bd[2]], [ @@ -564,7 +556,7 @@ def __init__(self, derham, transposed=False, weights=None): ] grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_2]) - self._bf2_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._bf2_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] self.hist_grid_2_b = [ [self.hist_grid_2_bn[0], self.hist_grid_2_bd[1], self.hist_grid_2_bd[2]], [ @@ -735,8 +727,8 @@ def __init__(self, derham, phys_domain, Uv, gamma, transposed=False, weights1=No self._proj_p_metric = deepcopy(metric) grid_shape = tuple([len(loc_grid) for loc_grid in int_grid]) - self._pf_values = xp.zeros(grid_shape, dtype=float) - self._mapped_pf_values = xp.zeros(grid_shape, dtype=float) + self._pf_values = np.zeros(grid_shape, dtype=float) + self._mapped_pf_values = np.zeros(grid_shape, dtype=float) # gradient of the component of the vector field @@ -757,13 +749,13 @@ def __init__(self, derham, phys_domain, Uv, gamma, transposed=False, weights1=No ) grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_20]) - self._pf_0_values = xp.zeros(grid_shape, dtype=float) + self._pf_0_values = np.zeros(grid_shape, dtype=float) grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_21]) - self._pf_1_values = xp.zeros(grid_shape, dtype=float) + self._pf_1_values = np.zeros(grid_shape, dtype=float) grid_shape = tuple([len(loc_grid) for loc_grid in hist_grid_22]) - self._pf_2_values = xp.zeros(grid_shape, dtype=float) + self._pf_2_values = np.zeros(grid_shape, dtype=float) @property def domain(self): @@ -885,21 +877,21 @@ def __init__(self, derham, gamma): self.rhof1 = self._derham.create_spline_function("rhof1", "L2") grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._rhof_values = xp.zeros(grid_shape, dtype=float) - self._rhof1_values = xp.zeros(grid_shape, dtype=float) - self._sf_values = xp.zeros(grid_shape, dtype=float) - self._sf1_values = xp.zeros(grid_shape, dtype=float) - self._delta_values = xp.zeros(grid_shape, dtype=float) - self._rhof_mid_values = xp.zeros(grid_shape, dtype=float) - self._sf_mid_values = xp.zeros(grid_shape, dtype=float) - self._eta_values = xp.zeros(grid_shape, dtype=float) - self._en_values = xp.zeros(grid_shape, dtype=float) - self._en1_values = xp.zeros(grid_shape, dtype=float) - self._de_values = xp.zeros(grid_shape, dtype=float) - self._d2e_values = xp.zeros(grid_shape, dtype=float) - self._tmp_int_grid = xp.zeros(grid_shape, dtype=float) - self._tmp_int_grid2 = xp.zeros(grid_shape, dtype=float) - self._DG_values = xp.zeros(grid_shape, dtype=float) + self._rhof_values = np.zeros(grid_shape, dtype=float) + self._rhof1_values = np.zeros(grid_shape, dtype=float) + self._sf_values = np.zeros(grid_shape, dtype=float) + self._sf1_values = np.zeros(grid_shape, dtype=float) + self._delta_values = np.zeros(grid_shape, dtype=float) + self._rhof_mid_values = np.zeros(grid_shape, dtype=float) + self._sf_mid_values = np.zeros(grid_shape, dtype=float) + self._eta_values = np.zeros(grid_shape, dtype=float) + self._en_values = np.zeros(grid_shape, dtype=float) + self._en1_values = np.zeros(grid_shape, dtype=float) + self._de_values = np.zeros(grid_shape, dtype=float) + self._d2e_values = np.zeros(grid_shape, dtype=float) + self._tmp_int_grid = np.zeros(grid_shape, dtype=float) + self._tmp_int_grid2 = np.zeros(grid_shape, dtype=float) + self._DG_values = np.zeros(grid_shape, dtype=float) def ener(self, rho, s, out=None): r"""Themodynamical energy as a function of rho and s, usign the perfect gaz hypothesis. @@ -909,13 +901,13 @@ def ener(self, rho, s, out=None): """ gam = self._gamma if out is None: - out = xp.power(rho, gam) * xp.exp(s / rho) + out = np.power(rho, gam) * np.exp(s / rho) else: out *= 0.0 out += s out /= rho - xp.exp(out, out=out) - xp.power(rho, gam, out=self._tmp_int_grid) + np.exp(out, out=out) + np.power(rho, gam, out=self._tmp_int_grid) out *= self._tmp_int_grid return out @@ -927,17 +919,17 @@ def dener_drho(self, rho, s, out=None): """ gam = self._gamma if out is None: - out = (gam * xp.power(rho, gam - 1) - s * xp.power(rho, gam - 2)) * xp.exp(s / rho) + out = (gam * np.power(rho, gam - 1) - s * np.power(rho, gam - 2)) * np.exp(s / rho) else: out *= 0.0 out += s out /= rho - xp.exp(out, out=out) + np.exp(out, out=out) - xp.power(rho, gam - 1, out=self._tmp_int_grid) + np.power(rho, gam - 1, out=self._tmp_int_grid) self._tmp_int_grid *= gam - xp.power(rho, gam - 2, out=self._tmp_int_grid2) + np.power(rho, gam - 2, out=self._tmp_int_grid2) self._tmp_int_grid2 *= s self._tmp_int_grid -= self._tmp_int_grid2 @@ -952,13 +944,13 @@ def dener_ds(self, rho, s, out=None): """ gam = self._gamma if out is None: - out = xp.power(rho, gam - 1) * xp.exp(s / rho) + out = np.power(rho, gam - 1) * np.exp(s / rho) else: out *= 0.0 out += s out /= rho - xp.exp(out, out=out) - xp.power(rho, gam - 1, out=self._tmp_int_grid) + np.exp(out, out=out) + np.power(rho, gam - 1, out=self._tmp_int_grid) out *= self._tmp_int_grid return out @@ -971,25 +963,25 @@ def d2ener_drho2(self, rho, s, out=None): gam = self._gamma if out is None: out = ( - gam * (gam - 1) * xp.power(rho, gam - 2) - - s * 2 * (gam - 1) * xp.power(rho, gam - 3) - + s**2 * xp.power(rho, gam - 4) - ) * xp.exp(s / rho) + gam * (gam - 1) * np.power(rho, gam - 2) + - s * 2 * (gam - 1) * np.power(rho, gam - 3) + + s**2 * np.power(rho, gam - 4) + ) * np.exp(s / rho) else: out *= 0.0 out += s out /= rho - xp.exp(out, out=out) + np.exp(out, out=out) - xp.power(rho, gam - 2, out=self._tmp_int_grid) + np.power(rho, gam - 2, out=self._tmp_int_grid) self._tmp_int_grid *= gam * (gam - 1) - xp.power(rho, gam - 3, out=self._tmp_int_grid2) + np.power(rho, gam - 3, out=self._tmp_int_grid2) self._tmp_int_grid2 *= s self._tmp_int_grid2 *= 2 * (gam - 1) self._tmp_int_grid -= self._tmp_int_grid2 - xp.power(rho, gam - 4, out=self._tmp_int_grid2) + np.power(rho, gam - 4, out=self._tmp_int_grid2) self._tmp_int_grid2 *= s self._tmp_int_grid2 *= s self._tmp_int_grid += self._tmp_int_grid2 @@ -1004,27 +996,27 @@ def d2ener_ds2(self, rho, s, out=None): """ gam = self._gamma if out is None: - out = xp.power(rho, gam - 2) * xp.exp(s / rho) + out = np.power(rho, gam - 2) * np.exp(s / rho) else: out *= 0.0 out += s out /= rho - xp.exp(out, out=out) - xp.power(rho, gam - 2, out=self._tmp_int_grid) + np.exp(out, out=out) + np.power(rho, gam - 2, out=self._tmp_int_grid) out *= self._tmp_int_grid return out def eta(self, delta_x, out=None): r"""Switch function :math:`\eta(\delta) = 1- \text{exp}((-\delta/10^{-5})^2)`.""" if out is None: - out = 1.0 - xp.exp(-((delta_x / 1e-5) ** 2)) + out = 1.0 - np.exp(-((delta_x / 1e-5) ** 2)) else: out *= 0.0 out += delta_x out /= 1e-5 out **= 2 out *= -1 - xp.exp(out, out=out) + np.exp(out, out=out) out *= -1 out += 1.0 return out @@ -1336,7 +1328,7 @@ def __init__(self, derham, mass_ops, domain): ) grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._f_values = xp.zeros(grid_shape, dtype=float) + self._f_values = np.zeros(grid_shape, dtype=float) metric = domain.metric(*integration_grid) self._mass_metric_term = deepcopy(metric) @@ -1391,12 +1383,7 @@ def update_weight(self, coeffs): self._pc.update_mass_operator(self._massop) def _create_inv( - self, - type="pcg", - pc_type="MassMatrixDiagonalPreconditioner", - tol=1e-16, - maxiter=500, - verbose=False, + self, type="pcg", pc_type="MassMatrixDiagonalPreconditioner", tol=1e-16, maxiter=500, verbose=False ): """Inverse the weighted mass matrix""" if pc_type is None: @@ -1450,10 +1437,10 @@ def __init__(self, derham, domain, mass_ops): self.uf = derham.create_spline_function("uf", "H1vec") self.uf1 = derham.create_spline_function("uf1", "H1vec") - self._uf_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._uf1_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._Guf_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._tmp_int_grid = xp.zeros(grid_shape, dtype=float) + self._uf_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._uf1_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._Guf_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._tmp_int_grid = np.zeros(grid_shape, dtype=float) metric = domain.metric( *integration_grid, diff --git a/src/struphy/fields_background/base.py b/src/struphy/fields_background/base.py index 7ad6e3887..275fb13f9 100644 --- a/src/struphy/fields_background/base.py +++ b/src/struphy/fields_background/base.py @@ -2,11 +2,11 @@ from abc import ABCMeta, abstractmethod -import cunumpy as xp from matplotlib import pyplot as plt from pyevtk.hl import gridToVTK from struphy.geometry.base import Domain +from struphy.utils.arrays import xp as np class FluidEquilibrium(metaclass=ABCMeta): @@ -47,7 +47,7 @@ def domain(self): @domain.setter def domain(self, new_domain): - assert isinstance(new_domain, Domain) or new_domain is None + assert isinstance(new_domain, Domain) self._domain = new_domain ########################### @@ -140,7 +140,7 @@ def t3(self, *etas, squeeze_out=False): def vth0(self, *etas, squeeze_out=False): """0-form thermal velocity on logical cube [0, 1]^3.""" - return xp.sqrt(self.t0(*etas, squeeze_out=squeeze_out)) + return np.sqrt(self.t0(*etas, squeeze_out=squeeze_out)) def vth3(self, *etas, squeeze_out=False): """3-form thermal velocity on logical cube [0, 1]^3.""" @@ -156,7 +156,7 @@ def q0(self, *etas, squeeze_out=False): """0-form square root of the pressure on logical cube [0, 1]^3.""" # xyz = self.domain(*etas, squeeze_out=False) p = self.p0(*etas) - q = xp.sqrt(p) + q = np.sqrt(p) return self.domain.pull(q, *etas, kind="0", squeeze_out=squeeze_out) def q3(self, *etas, squeeze_out=False): @@ -176,7 +176,7 @@ def s0_monoatomic(self, *etas, squeeze_out=False): # xyz = self.domain(*etas, squeeze_out=False) p = self.p0(*etas) n = self.n0(*etas) - s = n * xp.log(p / (2 / 3 * xp.power(n, 5 / 3))) + s = n * np.log(p / (2 / 3 * np.power(n, 5 / 3))) return self.domain.pull(s, *etas, kind="0", squeeze_out=squeeze_out) def s3_monoatomic(self, *etas, squeeze_out=False): @@ -198,7 +198,7 @@ def s0_diatomic(self, *etas, squeeze_out=False): # xyz = self.domain(*etas, squeeze_out=False) p = self.p0(*etas) n = self.n0(*etas) - s = n * xp.log(p / (2 / 5 * xp.power(n, 7 / 5))) + s = n * np.log(p / (2 / 5 * np.power(n, 7 / 5))) return self.domain.pull(s, *etas, kind="0", squeeze_out=squeeze_out) def s3_diatomic(self, *etas, squeeze_out=False): @@ -395,7 +395,7 @@ def unit_b_cart(self, *etas, squeeze_out=False): """Unit vector Cartesian components of magnetic field evaluated on logical cube [0, 1]^3. Returns also (x,y,z).""" b, xyz = self.b_cart(*etas, squeeze_out=squeeze_out) absB = self.absB0(*etas, squeeze_out=squeeze_out) - out = xp.array([b[0] / absB, b[1] / absB, b[2] / absB], dtype=float) + out = np.array([b[0] / absB, b[1] / absB, b[2] / absB], dtype=float) return out, xyz def gradB1(self, *etas, squeeze_out=False): @@ -481,7 +481,7 @@ def av(self, *etas, squeeze_out=False): def absB0(self, *etas, squeeze_out=False): """0-form absolute value of magnetic field on logical cube [0, 1]^3.""" b, xyz = self.b_cart(*etas, squeeze_out=squeeze_out) - return xp.sqrt(b[0] ** 2 + b[1] ** 2 + b[2] ** 2) + return np.sqrt(b[0] ** 2 + b[1] ** 2 + b[2] ** 2) def absB3(self, *etas, squeeze_out=False): """3-form absolute value of magnetic field on logical cube [0, 1]^3.""" @@ -783,28 +783,19 @@ def u_cart(self, *etas, squeeze_out=False): def curl_unit_b1(self, *etas, squeeze_out=False): """1-form components of curl of unit magnetic field evaluated on logical cube [0, 1]^3. Returns also (x,y,z).""" return self.domain.pull( - self.curl_unit_b_cart(*etas, squeeze_out=False)[0], - *etas, - kind="1", - squeeze_out=squeeze_out, + self.curl_unit_b_cart(*etas, squeeze_out=False)[0], *etas, kind="1", squeeze_out=squeeze_out ) def curl_unit_b2(self, *etas, squeeze_out=False): """2-form components of curl of unit magnetic field evaluated on logical cube [0, 1]^3. Returns also (x,y,z).""" return self.domain.pull( - self.curl_unit_b_cart(*etas, squeeze_out=False)[0], - *etas, - kind="2", - squeeze_out=squeeze_out, + self.curl_unit_b_cart(*etas, squeeze_out=False)[0], *etas, kind="2", squeeze_out=squeeze_out ) def curl_unit_bv(self, *etas, squeeze_out=False): """Contra-variant components of curl of unit magnetic field evaluated on logical cube [0, 1]^3. Returns also (x,y,z).""" return self.domain.pull( - self.curl_unit_b_cart(*etas, squeeze_out=False)[0], - *etas, - kind="v", - squeeze_out=squeeze_out, + self.curl_unit_b_cart(*etas, squeeze_out=False)[0], *etas, kind="v", squeeze_out=squeeze_out ) def curl_unit_b_cart(self, *etas, squeeze_out=False): @@ -813,7 +804,7 @@ def curl_unit_b_cart(self, *etas, squeeze_out=False): j, xyz = self.j_cart(*etas, squeeze_out=squeeze_out) gradB, xyz = self.gradB_cart(*etas, squeeze_out=squeeze_out) absB = self.absB0(*etas, squeeze_out=squeeze_out) - out = xp.array( + out = np.array( [ j[0] / absB + (b[1] * gradB[2] - b[2] * gradB[1]) / absB**2, j[1] / absB + (b[2] * gradB[0] - b[0] * gradB[2]) / absB**2, @@ -917,9 +908,9 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): "HollowTorus", ) - e1 = xp.linspace(0.0001, 1, n1) - e2 = xp.linspace(0, 1, n2) - e3 = xp.linspace(0, 1, n3) + e1 = np.linspace(0.0001, 1, n1) + e2 = np.linspace(0, 1, n2) + e3 = np.linspace(0, 1, n3) if self.domain.__class__.__name__ in ("GVECunit", "DESCunit"): if n_planes > 1: @@ -944,7 +935,7 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): print("Computation of abs(B) done.") j_cart, xyz = self.j_cart(e1, e2, e3) print("Computation of current density done.") - absJ = xp.sqrt(j_cart[0] ** 2 + j_cart[1] ** 2 + j_cart[2] ** 2) + absJ = np.sqrt(j_cart[0] ** 2 + j_cart[1] ** 2 + j_cart[2] ** 2) _path = struphy.__path__[0] + "/fields_background/mhd_equil/gvec/output/" gridToVTK( @@ -967,24 +958,24 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): print(key, ": ", val) # poloidal plane grid - fig = plt.figure(figsize=(13, xp.ceil(n_planes / 2) * 6.5)) + fig = plt.figure(figsize=(13, np.ceil(n_planes / 2) * 6.5)) for n in range(n_planes): - xpp = x[:, :, int(n * jump)].squeeze() + xp = x[:, :, int(n * jump)].squeeze() yp = y[:, :, int(n * jump)].squeeze() zp = z[:, :, int(n * jump)].squeeze() if self.domain.__class__.__name__ in torus_mappings: - pc1 = xp.sqrt(xpp**2 + yp**2) + pc1 = np.sqrt(xp**2 + yp**2) pc2 = zp l1 = "R" l2 = "Z" else: - pc1 = xpp + pc1 = xp pc2 = yp l1 = "x" l2 = "y" - ax = fig.add_subplot(int(xp.ceil(n_planes / 2)), 2, n + 1) + ax = fig.add_subplot(int(np.ceil(n_planes / 2)), 2, n + 1) for i in range(pc1.shape[0]): for j in range(pc1.shape[1] - 1): if i < pc1.shape[0] - 1: @@ -1013,26 +1004,26 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): ) # top view - e1 = xp.linspace(0, 1, n1) # radial coordinate in [0, 1] - e2 = xp.linspace(0, 1, 3) # poloidal angle in [0, 1] - e3 = xp.linspace(0, 1, n3) # toroidal angle in [0, 1] + e1 = np.linspace(0, 1, n1) # radial coordinate in [0, 1] + e2 = np.linspace(0, 1, 3) # poloidal angle in [0, 1] + e3 = np.linspace(0, 1, n3) # toroidal angle in [0, 1] xt, yt, zt = self.domain(e1, e2, e3) fig = plt.figure(figsize=(13, 2 * 6.5)) ax = fig.add_subplot() for m in range(2): - xpp = xt[:, m, :].squeeze() + xp = xt[:, m, :].squeeze() yp = yt[:, m, :].squeeze() zp = zt[:, m, :].squeeze() if self.domain.__class__.__name__ in torus_mappings: - tc1 = xpp + tc1 = xp tc2 = yp l1 = "x" l2 = "y" else: - tc1 = xpp + tc1 = xp tc2 = zp l1 = "x" l2 = "z" @@ -1067,26 +1058,26 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): ax.set_title("Device top view") # Jacobian determinant - fig = plt.figure(figsize=(13, xp.ceil(n_planes / 2) * 6.5)) + fig = plt.figure(figsize=(13, np.ceil(n_planes / 2) * 6.5)) for n in range(n_planes): - xpp = x[:, :, int(n * jump)].squeeze() + xp = x[:, :, int(n * jump)].squeeze() yp = y[:, :, int(n * jump)].squeeze() zp = z[:, :, int(n * jump)].squeeze() if self.domain.__class__.__name__ in torus_mappings: - pc1 = xp.sqrt(xpp**2 + yp**2) + pc1 = np.sqrt(xp**2 + yp**2) pc2 = zp l1 = "R" l2 = "Z" else: - pc1 = xpp + pc1 = xp pc2 = yp l1 = "x" l2 = "y" detp = det_df[:, :, int(n * jump)].squeeze() - ax = fig.add_subplot(int(xp.ceil(n_planes / 2)), 2, n + 1) + ax = fig.add_subplot(int(np.ceil(n_planes / 2)), 2, n + 1) map = ax.contourf(pc1, pc2, detp, 30) ax.set_xlabel(l1) ax.set_ylabel(l2) @@ -1097,26 +1088,26 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): fig.colorbar(map, ax=ax, location="right") # pressure - fig = plt.figure(figsize=(15, xp.ceil(n_planes / 2) * 6.5)) + fig = plt.figure(figsize=(15, np.ceil(n_planes / 2) * 6.5)) for n in range(n_planes): - xpp = x[:, :, int(n * jump)].squeeze() + xp = x[:, :, int(n * jump)].squeeze() yp = y[:, :, int(n * jump)].squeeze() zp = z[:, :, int(n * jump)].squeeze() if self.domain.__class__.__name__ in torus_mappings: - pc1 = xp.sqrt(xpp**2 + yp**2) + pc1 = np.sqrt(xp**2 + yp**2) pc2 = zp l1 = "R" l2 = "Z" else: - pc1 = xpp + pc1 = xp pc2 = yp l1 = "x" l2 = "y" pp = p[:, :, int(n * jump)].squeeze() - ax = fig.add_subplot(int(xp.ceil(n_planes / 2)), 2, n + 1) + ax = fig.add_subplot(int(np.ceil(n_planes / 2)), 2, n + 1) map = ax.contourf(pc1, pc2, pp, 30) ax.set_xlabel(l1) ax.set_ylabel(l2) @@ -1127,26 +1118,26 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): fig.colorbar(map, ax=ax, location="right") # density - fig = plt.figure(figsize=(15, xp.ceil(n_planes / 2) * 6.5)) + fig = plt.figure(figsize=(15, np.ceil(n_planes / 2) * 6.5)) for n in range(n_planes): - xpp = x[:, :, int(n * jump)].squeeze() + xp = x[:, :, int(n * jump)].squeeze() yp = y[:, :, int(n * jump)].squeeze() zp = z[:, :, int(n * jump)].squeeze() if self.domain.__class__.__name__ in torus_mappings: - pc1 = xp.sqrt(xpp**2 + yp**2) + pc1 = np.sqrt(xp**2 + yp**2) pc2 = zp l1 = "R" l2 = "Z" else: - pc1 = xpp + pc1 = xp pc2 = yp l1 = "x" l2 = "y" nn = n_dens[:, :, int(n * jump)].squeeze() - ax = fig.add_subplot(int(xp.ceil(n_planes / 2)), 2, n + 1) + ax = fig.add_subplot(int(np.ceil(n_planes / 2)), 2, n + 1) map = ax.contourf(pc1, pc2, nn, 30) ax.set_xlabel(l1) ax.set_ylabel(l2) @@ -1157,26 +1148,26 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): fig.colorbar(map, ax=ax, location="right") # magnetic field strength - fig = plt.figure(figsize=(15, xp.ceil(n_planes / 2) * 6.5)) + fig = plt.figure(figsize=(15, np.ceil(n_planes / 2) * 6.5)) for n in range(n_planes): - xpp = x[:, :, int(n * jump)].squeeze() + xp = x[:, :, int(n * jump)].squeeze() yp = y[:, :, int(n * jump)].squeeze() zp = z[:, :, int(n * jump)].squeeze() if self.domain.__class__.__name__ in torus_mappings: - pc1 = xp.sqrt(xpp**2 + yp**2) + pc1 = np.sqrt(xp**2 + yp**2) pc2 = zp l1 = "R" l2 = "Z" else: - pc1 = xpp + pc1 = xp pc2 = yp l1 = "x" l2 = "y" ab = absB[:, :, int(n * jump)].squeeze() - ax = fig.add_subplot(int(xp.ceil(n_planes / 2)), 2, n + 1) + ax = fig.add_subplot(int(np.ceil(n_planes / 2)), 2, n + 1) map = ax.contourf(pc1, pc2, ab, 30) ax.set_xlabel(l1) ax.set_ylabel(l2) @@ -1187,26 +1178,26 @@ def show(self, n1=16, n2=33, n3=21, n_planes=5): fig.colorbar(map, ax=ax, location="right") # current density - fig = plt.figure(figsize=(15, xp.ceil(n_planes / 2) * 6.5)) + fig = plt.figure(figsize=(15, np.ceil(n_planes / 2) * 6.5)) for n in range(n_planes): - xpp = x[:, :, int(n * jump)].squeeze() + xp = x[:, :, int(n * jump)].squeeze() yp = y[:, :, int(n * jump)].squeeze() zp = z[:, :, int(n * jump)].squeeze() if self.domain.__class__.__name__ in torus_mappings: - pc1 = xp.sqrt(xpp**2 + yp**2) + pc1 = np.sqrt(xp**2 + yp**2) pc2 = zp l1 = "R" l2 = "Z" else: - pc1 = xpp + pc1 = xp pc2 = yp l1 = "x" l2 = "y" ab = absJ[:, :, int(n * jump)].squeeze() - ax = fig.add_subplot(int(xp.ceil(n_planes / 2)), 2, n + 1) + ax = fig.add_subplot(int(np.ceil(n_planes / 2)), 2, n + 1) map = ax.contourf(pc1, pc2, ab, 30) ax.set_xlabel(l1) ax.set_ylabel(l2) @@ -1316,8 +1307,8 @@ def b_xyz(self, x, y, z): BZ = self.psi(R, Z, dR=1) / R # push-forward to Cartesian components - Bx = BR * xp.cos(Phi) - BP * xp.sin(Phi) - By = BR * xp.sin(Phi) + BP * xp.cos(Phi) + Bx = BR * np.cos(Phi) - BP * np.sin(Phi) + By = BR * np.sin(Phi) + BP * np.cos(Phi) Bz = 1 * BZ return Bx, By, Bz @@ -1333,8 +1324,8 @@ def j_xyz(self, x, y, z): jZ = self.g_tor(R, Z, dR=1) / R # push-forward to Cartesian components - jx = jR * xp.cos(Phi) - jP * xp.sin(Phi) - jy = jR * xp.sin(Phi) + jP * xp.cos(Phi) + jx = jR * np.cos(Phi) - jP * np.sin(Phi) + jy = jR * np.sin(Phi) + jP * np.cos(Phi) jz = 1 * jZ return jx, jy, jz @@ -1344,7 +1335,7 @@ def gradB_xyz(self, x, y, z): R, Phi, Z = self.inverse_map(x, y, z) - RabsB = xp.sqrt( + RabsB = np.sqrt( self.psi(R, Z, dZ=1) ** 2 + self.g_tor(R, Z) ** 2 + self.psi(R, Z, dR=1) ** 2, ) @@ -1372,8 +1363,8 @@ def gradB_xyz(self, x, y, z): ) # push-forward to Cartesian components - gradBx = gradBR * xp.cos(Phi) - gradBP * xp.sin(Phi) - gradBy = gradBR * xp.sin(Phi) + gradBP * xp.cos(Phi) + gradBx = gradBR * np.cos(Phi) - gradBP * np.sin(Phi) + gradBy = gradBR * np.sin(Phi) + gradBP * np.cos(Phi) gradBz = 1 * gradBZ return gradBx, gradBy, gradBz @@ -1382,8 +1373,8 @@ def gradB_xyz(self, x, y, z): def inverse_map(x, y, z): """Inverse cylindrical mapping.""" - R = xp.sqrt(x**2 + y**2) - P = xp.arctan2(y, x) + R = np.sqrt(x**2 + y**2) + P = np.arctan2(y, x) Z = 1 * z return R, P, Z diff --git a/src/struphy/fields_background/coil_fields/base.py b/src/struphy/fields_background/coil_fields/base.py index 331e89e7d..540268ef3 100644 --- a/src/struphy/fields_background/coil_fields/base.py +++ b/src/struphy/fields_background/coil_fields/base.py @@ -1,9 +1,10 @@ from abc import ABCMeta, abstractmethod -import cunumpy as xp from matplotlib import pyplot as plt from pyevtk.hl import gridToVTK +from struphy.utils.arrays import xp as np + class CoilMagneticField(metaclass=ABCMeta): """ @@ -31,7 +32,7 @@ def b_xyz(self, x, y, z): def absB0(self, *etas, squeeze_out=False): """0-form absolute value of equilibrium magnetic field on logical cube [0, 1]^3.""" b, xyz = self.b_cart(*etas, squeeze_out=squeeze_out) - return xp.sqrt(b[0] ** 2 + b[1] ** 2 + b[2] ** 2) + return np.sqrt(b[0] ** 2 + b[1] ** 2 + b[2] ** 2) def absB3(self, *etas, squeeze_out=False): """3-form absolute value of equilibrium magnetic field on logical cube [0, 1]^3.""" @@ -71,11 +72,7 @@ def bv(self, *etas, squeeze_out=False): def b_cart(self, *etas, squeeze_out=False): """Cartesian components of equilibrium magnetic field evaluated on logical cube [0, 1]^3. Returns also (x,y,z).""" b_out = self.domain.push( - self.bv(*etas, squeeze_out=False), - *etas, - kind="v", - a_kwargs={"squeeze_out": False}, - squeeze_out=squeeze_out, + self.bv(*etas, squeeze_out=False), *etas, kind="v", a_kwargs={"squeeze_out": False}, squeeze_out=squeeze_out ) return b_out, self.domain(*etas, squeeze_out=squeeze_out) @@ -95,7 +92,7 @@ def unit_b_cart(self, *etas, squeeze_out=False): """Unit vector Cartesian components of equilibrium magnetic field evaluated on logical cube [0, 1]^3. Returns also (x,y,z).""" b, xyz = self.b_cart(*etas, squeeze_out=squeeze_out) absB = self.absB0(*etas, squeeze_out=squeeze_out) - out = xp.array([b[0] / absB, b[1] / absB, b[2] / absB], dtype=float) + out = np.array([b[0] / absB, b[1] / absB, b[2] / absB], dtype=float) return out, xyz diff --git a/src/struphy/fields_background/coil_fields/coil_fields.py b/src/struphy/fields_background/coil_fields/coil_fields.py index 1b5c66a15..75895c465 100644 --- a/src/struphy/fields_background/coil_fields/coil_fields.py +++ b/src/struphy/fields_background/coil_fields/coil_fields.py @@ -1,7 +1,6 @@ -import cunumpy as xp - from struphy.feec.psydac_derham import Derham from struphy.fields_background.coil_fields.base import CoilMagneticField, load_csv_data +from struphy.utils.arrays import xp as np class RatGUI(CoilMagneticField): @@ -15,9 +14,7 @@ def __init__(self, csv_path=None, Nel=[16, 16, 16], p=[3, 3, 3], domain=None, ** self._ratgui_csv_data = load_csv_data(csv_path) derham = Derham( - Nel=Nel, - p=p, - spl_kind=[False, False, True], + Nel=Nel, p=p, spl_kind=[False, False, True] ) # Assuming (R=eta1, Z=eta2, phi=eta3) coordinates for csv data (periodic in eta3 only). self._interpolate = derham.P[ "v" @@ -36,14 +33,14 @@ def __init__(self, csv_path=None, Nel=[16, 16, 16], p=[3, 3, 3], domain=None, ** self.rhs[1][:] = B_Z self.rhs[2][:] = B_phi - print(f"{self.rhs =}") - print(f"{derham.nbasis['v'] =}") - print(f"{self.rhs[0] =}") - print(f"{self.rhs[1] =}") - print(f"{self.rhs[2] =}") - print(f"{self.rhs[0][:].shape =}") - print(f"{self.rhs[1][:].shape =}") - print(f"{self.rhs[2][:].shape =}") + print(f"{self.rhs = }") + print(f"{derham.nbasis['v'] = }") + print(f"{self.rhs[0] = }") + print(f"{self.rhs[1] = }") + print(f"{self.rhs[2] = }") + print(f"{self.rhs[0][:].shape = }") + print(f"{self.rhs[1][:].shape = }") + print(f"{self.rhs[2][:].shape = }") # We need to choose Nel and p such that the csv_data fits into this vector. # For a periodic direction, the size of the vector is Nel, for non-periodic (spl_kind=False) the size is Nel + p. # See the Tutorial on FEEC data structures https://struphy.pages.mpcdf.de/struphy/tutorials/tutorial_06_data_structures.html#FEEC-data-structures on how to address such a vector @@ -82,8 +79,8 @@ def bfield_RZphi(self): def b_xyz(self, x, y, z): """Cartesian coil magnetic field in physical space. Must return the components as a tuple.""" # compute (R, Z, phi) corrdinates from (x, y, z), for example: - R = xp.sqrt(x**2 + y**2) + R = np.sqrt(x**2 + y**2) Z = z - phi = -xp.arctan2(y / x) + phi = -np.arctan2(y / x) return self.bfield_RZphi(R, Z, phi) diff --git a/src/struphy/fields_background/equils.py b/src/struphy/fields_background/equils.py index f1afacd35..71488ce7f 100644 --- a/src/struphy/fields_background/equils.py +++ b/src/struphy/fields_background/equils.py @@ -7,7 +7,6 @@ import warnings from time import time -import cunumpy as xp from scipy.integrate import odeint, quad from scipy.interpolate import RectBivariateSpline, UnivariateSpline from scipy.optimize import fsolve, minimize @@ -29,6 +28,7 @@ NumericalMHDequilibrium, ) from struphy.fields_background.mhd_equil.eqdsk import readeqdsk +from struphy.utils.arrays import xp as np from struphy.utils.utils import read_state, subp_run @@ -178,8 +178,7 @@ class ShearedSlab(CartesianMHDequilibrium): Ion number density at x=a (default: 1.). beta : float Plasma beta (ratio of kinematic pressure to B^2/2, default: 0.1). - q_kind : int - Kind of safety factor profile, (0 or 1, default: 0). + Note ---- In the parameter .yml, use the following in the section ``fluid_background``:: @@ -194,7 +193,6 @@ class ShearedSlab(CartesianMHDequilibrium): n2 : 0. # 2nd shape factor for ion number density profile na : 1. # number density at r=a beta : .1 # plasma beta = p*2/B^2 - q_kind : 0. # kind of safety factor profile """ def __init__( @@ -208,7 +206,6 @@ def __init__( n2: float = 0.0, na: float = 1.0, beta: float = 0.1, - q_kind: int = 0, ): # use params setter self.params = copy.deepcopy(locals()) @@ -229,19 +226,10 @@ def q_x(self, x, der=0): qout = 0 * x else: - if self.params["q_kind"] == 0: - if der == 0: - qout = self.params["q0"] + (self.params["q1"] - self.params["q0"]) * (x / self.params["a"]) ** 2 - else: - qout = 2 * (self.params["q1"] - self.params["q0"]) * x / self.params["a"] ** 2 - + if der == 0: + qout = self.params["q0"] + (self.params["q1"] - self.params["q0"]) * (x / self.params["a"]) ** 2 else: - if der == 0: - qout = self.params["q0"] + self.params["q1"] * xp.sin(2.0 * xp.pi * x / self.params["a"]) - else: - qout = ( - 2.0 * xp.pi / self.params["a"] * self.params["q1"] * xp.cos(2.0 * xp.pi * x / self.params["a"]) - ) + qout = 2 * (self.params["q1"] - self.params["q0"]) * x / self.params["a"] ** 2 return qout @@ -251,7 +239,7 @@ def p_x(self, x): eps = self.params["a"] / self.params["R0"] - if xp.all(q >= 100.0): + if np.all(q >= 100.0): pout = self.params["B0"] ** 2 * self.params["beta"] / 2.0 - 0 * x else: pout = self.params["B0"] ** 2 * self.params["beta"] / 2.0 * (1 + eps**2 / q**2) + self.params[ @@ -273,7 +261,7 @@ def plot_profiles(self, n_pts=501): import matplotlib.pyplot as plt - x = xp.linspace(0.0, self.params["a"], n_pts) + x = np.linspace(0.0, self.params["a"], n_pts) fig, ax = plt.subplots(1, 3) @@ -307,7 +295,7 @@ def b_xyz(self, x, y, z): q = self.q_x(x) eps = self.params["a"] / self.params["R0"] - if xp.all(q >= 100.0): + if np.all(q >= 100.0): by = 0 * x bz = self.params["B0"] - 0 * x else: @@ -324,7 +312,7 @@ def j_xyz(self, x, y, z): q = self.q_x(x) eps = self.params["a"] / self.params["R0"] - if xp.all(q >= 100.0): + if np.all(q >= 100.0): jz = 0 * x else: jz = -self.params["B0"] * eps * self.q_x(x, der=1) / q**2 @@ -353,13 +341,13 @@ def gradB_xyz(self, x, y, z): q = self.q_x(x) eps = self.params["a"] / self.params["R0"] - if xp.all(q >= 100.0): + if np.all(q >= 100.0): gradBx = 0 * x else: gradBx = ( -self.params["B0"] * eps**2 - / xp.sqrt(1 + eps**2 / self.q_x(x) ** 2) + / np.sqrt(1 + eps**2 / self.q_x(x) ** 2) * self.q_x(x, der=1) / self.q_x(x) ** 3 ) @@ -457,8 +445,8 @@ def __init__( def T_z(self, z): r"""Swap function T(z) = \tanh(z - z_1)/\delta) - \tanh(z - z_2)/\delta)""" Tout = ( - xp.tanh((z - self.params["z1"]) / self.params["delta"]) - - xp.tanh((z - self.params["z2"]) / self.params["delta"]) + np.tanh((z - self.params["z1"]) / self.params["delta"]) + - np.tanh((z - self.params["z2"]) / self.params["delta"]) ) / 2.0 return Tout @@ -480,7 +468,7 @@ def plot_profiles(self, n_pts=501): import matplotlib.pyplot as plt - z = xp.linspace(0.0, self.params["c"], n_pts) + z = np.linspace(0.0, self.params["c"], n_pts) fig, ax = plt.subplots(1, 3) @@ -647,8 +635,8 @@ def __init__( self.params = copy.deepcopy(locals()) # inverse cylindrical coordinate transformation (x, y, z) --> (r, theta, phi) - self.r = lambda x, y, z: xp.sqrt(x**2 + y**2) - self.theta = lambda x, y, z: xp.arctan2(y, x) + self.r = lambda x, y, z: np.sqrt(x**2 + y**2) + self.theta = lambda x, y, z: np.arctan2(y, x) self.z = lambda x, y, z: 1 * z # =============================================================== @@ -707,7 +695,7 @@ def plot_profiles(self, n_pts=501): import matplotlib.pyplot as plt - r = xp.linspace(0.0, self.params["a"], n_pts) + r = np.linspace(0.0, self.params["a"], n_pts) fig, ax = plt.subplots(1, 3) @@ -718,7 +706,7 @@ def plot_profiles(self, n_pts=501): ax[0].set_xlabel("r") ax[0].set_ylabel("q") - ax[0].plot(r, xp.ones(r.size), "k--") + ax[0].plot(r, np.ones(r.size), "k--") ax[1].plot(r, self.p_r(r)) ax[1].set_xlabel("r") @@ -743,13 +731,13 @@ def b_xyz(self, x, y, z): theta = self.theta(x, y, z) q = self.q_r(r) # azimuthal component - if xp.all(q >= 100.0): + if np.all(q >= 100.0): b_theta = 0 * r else: b_theta = self.params["B0"] * r / (self.params["R0"] * q) # cartesian x-component - bx = -b_theta * xp.sin(theta) - by = b_theta * xp.cos(theta) + bx = -b_theta * np.sin(theta) + by = b_theta * np.cos(theta) bz = self.params["B0"] - 0 * x return bx, by, bz @@ -763,7 +751,7 @@ def j_xyz(self, x, y, z): r = self.r(x, y, z) q = self.q_r(r) q_p = self.q_r(r, der=1) - if xp.all(q >= 100.0): + if np.all(q >= 100.0): jz = 0 * x else: jz = self.params["B0"] / (self.params["R0"] * q**2) * (2 * q - r * q_p) @@ -790,13 +778,13 @@ def gradB_xyz(self, x, y, z): r = self.r(x, y, z) theta = self.theta(x, y, z) q = self.q_r(r) - if xp.all(q >= 100.0): + if np.all(q >= 100.0): gradBr = 0 * x else: gradBr = ( self.params["B0"] / self.params["R0"] ** 2 - / xp.sqrt( + / np.sqrt( 1 + r**2 / self.q_r( @@ -807,8 +795,8 @@ def gradB_xyz(self, x, y, z): ) * (r / self.q_r(r) ** 2 - r**2 / self.q_r(r) ** 3 * self.q_r(r, der=1)) ) - gradBx = gradBr * xp.cos(theta) - gradBy = gradBr * xp.sin(theta) + gradBx = gradBr * np.cos(theta) + gradBy = gradBr * np.sin(theta) gradBz = 0 * x return gradBx, gradBy, gradBz @@ -947,10 +935,10 @@ def __init__( self.params = copy.deepcopy(locals()) # plasma boundary contour - ths = xp.linspace(0.0, 2 * xp.pi, 201) + ths = np.linspace(0.0, 2 * np.pi, 201) - self._rbs = self.params["R0"] * (1 + self.params["a"] / self.params["R0"] * xp.cos(ths)) - self._zbs = self.params["a"] * xp.sin(ths) + self._rbs = self.params["R0"] * (1 + self.params["a"] / self.params["R0"] * np.cos(ths)) + self._zbs = self.params["a"] * np.sin(ths) # set on-axis and boundary fluxes if self.params["q_kind"] == 0: @@ -961,12 +949,12 @@ def __init__( self._p_i = None else: - r_i = xp.linspace(0.0, self.params["a"], self.params["psi_nel"] + 1) + r_i = np.linspace(0.0, self.params["a"], self.params["psi_nel"] + 1) def dpsi_dr(r): - return self.params["B0"] * r / (self.q_r(r) * xp.sqrt(1 - r**2 / self.params["R0"] ** 2)) + return self.params["B0"] * r / (self.q_r(r) * np.sqrt(1 - r**2 / self.params["R0"] ** 2)) - psis = xp.zeros_like(r_i) + psis = np.zeros_like(r_i) for i, rr in enumerate(r_i): psis[i] = quad(dpsi_dr, 0.0, rr)[0] @@ -989,7 +977,7 @@ def dp_dr(r): * (2 * self.q_r(r) - r * self.q_r(r, der=1)) ) - ps = xp.zeros_like(r_i) + ps = np.zeros_like(r_i) for i, rr in enumerate(r_i): ps[i] = quad(dp_dr, 0.0, rr)[0] @@ -1044,7 +1032,7 @@ def psi_r(self, r, der=0): dq = q1 - q0 # geometric correction factor and its first derivative - gf_0 = xp.sqrt(1 - (r / self.params["R0"]) ** 2) + gf_0 = np.sqrt(1 - (r / self.params["R0"]) ** 2) gf_1 = -r / (self.params["R0"] ** 2 * gf_0) # safety factors @@ -1055,9 +1043,9 @@ def psi_r(self, r, der=0): q_bar_1 = q_1 * gf_0 + q_0 * gf_1 if der == 0: - out = -self.params["B0"] * self.params["a"] ** 2 / xp.sqrt(dq * q0 * eps**2 + dq**2) - out *= xp.arctanh( - xp.sqrt((dq - dq * (r / self.params["R0"]) ** 2) / (q0 * eps**2 + dq)), + out = -self.params["B0"] * self.params["a"] ** 2 / np.sqrt(dq * q0 * eps**2 + dq**2) + out *= np.arctanh( + np.sqrt((dq - dq * (r / self.params["R0"]) ** 2) / (q0 * eps**2 + dq)), ) elif der == 1: out = self.params["B0"] * r / q_bar_0 @@ -1127,10 +1115,10 @@ def q_r(self, r, der=0): r_flat = r.flatten() - r_zeros = xp.where(r_flat == 0.0)[0] - r_nzero = xp.where(r_flat != 0.0)[0] + r_zeros = np.where(r_flat == 0.0)[0] + r_nzero = np.where(r_flat != 0.0)[0] - qout = xp.zeros(r_flat.size, dtype=float) + qout = np.zeros(r_flat.size, dtype=float) if der == 0: if self.params["q0"] == self.params["q1"]: @@ -1223,7 +1211,7 @@ def plot_profiles(self, n_pts=501): import matplotlib.pyplot as plt - r = xp.linspace(0.0, self.params["a"], n_pts) + r = np.linspace(0.0, self.params["a"], n_pts) fig, ax = plt.subplots(2, 2) @@ -1257,7 +1245,7 @@ def plot_profiles(self, n_pts=501): def psi(self, R, Z, dR=0, dZ=0): """Poloidal flux function psi = psi(R, Z).""" - r = xp.sqrt(Z**2 + (R - self.params["R0"]) ** 2) + r = np.sqrt(Z**2 + (R - self.params["R0"]) ** 2) if dR == 0 and dZ == 0: out = self.psi_r(r, der=0) @@ -1305,7 +1293,7 @@ def g_tor(self, R, Z, dR=0, dZ=0): def p_xyz(self, x, y, z): """Pressure p = p(x, y, z).""" - r = xp.sqrt((xp.sqrt(x**2 + y**2) - self._params["R0"]) ** 2 + z**2) + r = np.sqrt((np.sqrt(x**2 + y**2) - self._params["R0"]) ** 2 + z**2) pp = self.p_r(r) @@ -1313,7 +1301,7 @@ def p_xyz(self, x, y, z): def n_xyz(self, x, y, z): """Number density n = n(x, y, z).""" - r = xp.sqrt((xp.sqrt(x**2 + y**2) - self._params["R0"]) ** 2 + z**2) + r = np.sqrt((np.sqrt(x**2 + y**2) - self._params["R0"]) ** 2 + z**2) nn = self.n_r(r) @@ -1441,10 +1429,10 @@ def __init__( self.params = copy.deepcopy(locals()) # plasma boundary contour - ths = xp.linspace(0.0, 2 * xp.pi, 201) + ths = np.linspace(0.0, 2 * np.pi, 201) - self._rbs = self.params["R0"] * (1 + self.params["a"] / self.params["R0"] * xp.cos(ths)) - self._zbs = self.params["a"] * xp.sin(ths) + self._rbs = self.params["R0"] * (1 + self.params["a"] / self.params["R0"] * np.cos(ths)) + self._zbs = self.params["a"] * np.sin(ths) # on-axis flux (arbitrary value) self._psi0 = -10.0 @@ -1465,12 +1453,12 @@ def dpsi_dr(psi, r, psi1): q = q0 + psi_norm * (q1 - q0 + (q1p - q1 + q0) * (1 - psi_s) * (psi_norm - 1) / (psi_norm - psi_s)) - out = B0 * r / (q * xp.sqrt(1 - r**2 / R0**2)) + out = B0 * r / (q * np.sqrt(1 - r**2 / R0**2)) return out # solve differential equation and fix boundary flux - r_i = xp.linspace(0.0, self.params["a"], self.params["psi_nel"] + 1) + r_i = np.linspace(0.0, self.params["a"], self.params["psi_nel"] + 1) def fun(psi1): out = odeint(dpsi_dr, self._psi0, r_i, args=(psi1,)).flatten() @@ -1557,13 +1545,13 @@ def p_psi(self, psi, der=0): psi_norm = (psi - self._psi0) / (self._psi1 - self._psi0) if der == 0: - out = self.params["beta"] * self.params["B0"] ** 2 / 2.0 * xp.exp(-psi_norm / p1) + out = self.params["beta"] * self.params["B0"] ** 2 / 2.0 * np.exp(-psi_norm / p1) else: out = ( -self.params["beta"] * self.params["B0"] ** 2 / 2.0 - * xp.exp(-psi_norm / p1) + * np.exp(-psi_norm / p1) / (p1 * (self._psi1 - self._psi0)) ) @@ -1592,8 +1580,8 @@ def plot_profiles(self, n_pts=501): import matplotlib.pyplot as plt - r = xp.linspace(0.0, self.params["a"], n_pts) - psi = xp.linspace(self._psi0, self._psi1, n_pts) + r = np.linspace(0.0, self.params["a"], n_pts) + psi = np.linspace(self._psi0, self._psi1, n_pts) fig, ax = plt.subplots(2, 2) @@ -1627,7 +1615,7 @@ def plot_profiles(self, n_pts=501): def psi(self, R, Z, dR=0, dZ=0): """Poloidal flux function psi = psi(R, Z).""" - r = xp.sqrt(Z**2 + (R - self.params["R0"]) ** 2) + r = np.sqrt(Z**2 + (R - self.params["R0"]) ** 2) if dR == 0 and dZ == 0: out = self.psi_r(r, der=0) @@ -1671,13 +1659,13 @@ def g_tor(self, R, Z, dR=0, dZ=0): def p_xyz(self, x, y, z): """Pressure p = p(x, y, z).""" - r = xp.sqrt((xp.sqrt(x**2 + y**2) - self._params["R0"]) ** 2 + z**2) + r = np.sqrt((np.sqrt(x**2 + y**2) - self._params["R0"]) ** 2 + z**2) return self.p_psi(self.psi_r(r)) def n_xyz(self, x, y, z): """Number density n = n(x, y, z).""" - r = xp.sqrt((xp.sqrt(x**2 + y**2) - self._params["R0"]) ** 2 + z**2) + r = np.sqrt((np.sqrt(x**2 + y**2) - self._params["R0"]) ** 2 + z**2) return self.n_psi(self.psi_r(r)) @@ -1759,7 +1747,7 @@ def __init__( units["p"] = 1.0 units["n"] = 1e20 warnings.warn( - f"{units =}, no rescaling performed in EQDSK output.", + f"{units = }, no rescaling performed in EQDSK output.", ) self._units = units @@ -1815,8 +1803,8 @@ def __init__( self._r_range = [rleft, rleft + rdim] self._z_range = [zmid - zdim / 2, zmid + zdim / 2] - R = xp.linspace(self._r_range[0], self._r_range[1], nR) - Z = xp.linspace(self._z_range[0], self._z_range[1], nZ) + R = np.linspace(self._r_range[0], self._r_range[1], nR) + Z = np.linspace(self._z_range[0], self._z_range[1], nZ) smooth_steps = [ int(1 / (self.params["psi_resolution"][0] * 0.01)), @@ -1846,7 +1834,7 @@ def __init__( self._psi1 = psi_edge # interpolate toroidal field function, pressure profile and q-profile on unifrom flux grid from axis to boundary - flux_grid = xp.linspace(self._psi0, self._psi1, g_profile.size) + flux_grid = np.linspace(self._psi0, self._psi1, g_profile.size) smooth_step = int(1 / (self.params["flux_resolution"] * 0.01)) @@ -2018,7 +2006,7 @@ def g_tor(self, R, Z, dR=0, dZ=0): def p_xyz(self, x, y, z): """Pressure p = p(x, y, z) in units 1 Tesla^2/mu_0.""" - R = xp.sqrt(x**2 + y**2) + R = np.sqrt(x**2 + y**2) Z = 1 * z out = self.p_psi(self.psi(R, Z)) @@ -2031,7 +2019,7 @@ def p_xyz(self, x, y, z): def n_xyz(self, x, y, z): """Number density in physical space. Units from parameter file.""" - R = xp.sqrt(x**2 + y**2) + R = np.sqrt(x**2 + y**2) Z = 1 * z out = self.n_psi(self.psi(R, Z)) @@ -2133,7 +2121,7 @@ def __init__( with pytest.raises(SystemExit) as exc: print("Simulation aborted, gvec must be installed (pip install gvec)!") sys.exit(1) - print(f"{exc.value.code =}") + print(f"{exc.value.code = }") import gvec @@ -2148,7 +2136,7 @@ def __init__( units["p"] = 1.0 units["n"] = 1e20 warnings.warn( - f"{units =}, no rescaling performed in GVEC output.", + f"{units = }, no rescaling performed in GVEC output.", ) self._units = units @@ -2211,9 +2199,9 @@ def bv(self, *etas, squeeze_out=False): bt += "_B" bz += "_B" self.state.compute(ev, bt, bz) - bv_2 = getattr(ev, bt).data / (2 * xp.pi) - bv_3 = getattr(ev, bz).data / (2 * xp.pi) * self._nfp - out = (xp.zeros_like(bv_2), bv_2, bv_3) + bv_2 = getattr(ev, bt).data / (2 * np.pi) + bv_3 = getattr(ev, bz).data / (2 * np.pi) * self._nfp + out = (np.zeros_like(bv_2), bv_2, bv_3) # apply struphy units for o in out: @@ -2231,8 +2219,8 @@ def jv(self, *etas, squeeze_out=False): self.state.compute(ev, jr, jt, jz) rmin = self._params["rmin"] jv_1 = ev.J_contra_r.data / (1.0 - rmin) - jv_2 = ev.J_contra_t.data / (2 * xp.pi) - jv_3 = ev.J_contra_z.data / (2 * xp.pi) * self._nfp + jv_2 = ev.J_contra_t.data / (2 * np.pi) + jv_3 = ev.J_contra_z.data / (2 * np.pi) * self._nfp if self.params["use_boozer"]: warnings.warn("GVEC current density in Boozer coords not yet implemented, set to zero.") # jr += "_B" @@ -2257,11 +2245,11 @@ def p0(self, *etas, squeeze_out=False): if not flat_eval: eta2 = etas[1] eta3 = etas[2] - if isinstance(eta2, xp.ndarray): + if isinstance(eta2, np.ndarray): if eta2.ndim == 3: eta2 = eta2[0, :, 0] eta3 = eta3[0, 0, :] - tmp, _1, _2 = xp.meshgrid(ev.p.data, eta2, eta3, indexing="ij") + tmp, _1, _2 = np.meshgrid(ev.p.data, eta2, eta3, indexing="ij") else: tmp = ev.p.data @@ -2321,7 +2309,7 @@ def _gvec_evaluations(self, *etas): etas = list(etas) for i, eta in enumerate(etas): if isinstance(eta, (float, int)): - etas[i] = xp.array((eta,)) + etas[i] = np.array((eta,)) assert etas[0].ndim == etas[1].ndim == etas[2].ndim if etas[0].ndim == 1: eta1 = etas[0] @@ -2338,8 +2326,8 @@ def _gvec_evaluations(self, *etas): # gvec coordinates rho = rmin + eta1 * (1.0 - rmin) - theta = 2 * xp.pi * eta2 - zeta = 2 * xp.pi * eta3 + theta = 2 * np.pi * eta2 + zeta = 2 * np.pi * eta3 # evaluate if self.params["use_boozer"]: @@ -2428,7 +2416,7 @@ def __init__( units["p"] = 1.0 units["n"] = 1e20 warnings.warn( - f"{units =}, no rescaling performed in DESC output.", + f"{units = }, no rescaling performed in DESC output.", ) self._units = units @@ -2509,7 +2497,7 @@ def bv(self, *etas, squeeze_out=False): li = [] for gi, ei in zip(grid, etas): if gi.shape == ei.shape: - li += [xp.allclose(gi, ei)] + li += [np.allclose(gi, ei)] else: li += [False] if all(li): @@ -2561,9 +2549,9 @@ def _eval_bv(self, *etas, squeeze_out=False): if var == "B^rho": tmp /= 1.0 - self.rmin elif var == "B^theta": - tmp /= 2.0 * xp.pi + tmp /= 2.0 * np.pi elif var == "B^zeta": - tmp /= 2.0 * xp.pi / nfp + tmp /= 2.0 * np.pi / nfp # adjust for Struphy units tmp /= self.units["B"] / self.units["x"] out += [tmp] @@ -2582,7 +2570,7 @@ def jv(self, *etas, squeeze_out=False): li = [] for gi, ei in zip(grid, etas): if gi.shape == ei.shape: - li += [xp.allclose(gi, ei)] + li += [np.allclose(gi, ei)] else: li += [False] if all(li): @@ -2634,9 +2622,9 @@ def _eval_jv(self, *etas, squeeze_out=False): if var == "J^rho": tmp /= 1.0 - self.rmin elif var == "J^theta": - tmp /= 2.0 * xp.pi + tmp /= 2.0 * np.pi elif var == "J^zeta": - tmp /= 2.0 * xp.pi / nfp + tmp /= 2.0 * np.pi / nfp # adjust for Struphy units tmp /= self.units["j"] / self.units["x"] out += [tmp] @@ -2706,7 +2694,7 @@ def gradB1(self, *etas, squeeze_out=False): li = [] for gi, ei in zip(grid, etas): if gi.shape == ei.shape: - li += [xp.allclose(gi, ei)] + li += [np.allclose(gi, ei)] else: li += [False] if all(li): @@ -2757,9 +2745,9 @@ def _eval_gradB1(self, *etas, squeeze_out=False): if var == "|B|_r": tmp *= 1.0 - self.rmin elif var == "|B|_t": - tmp *= 2.0 * xp.pi + tmp *= 2.0 * np.pi elif var == "|B|_z": - tmp *= 2.0 * xp.pi / nfp + tmp *= 2.0 * np.pi / nfp # adjust for Struphy units tmp /= self.units["B"] out += [tmp] @@ -2769,9 +2757,9 @@ def _eval_gradB1(self, *etas, squeeze_out=False): def desc_eval( self, var: str, - e1: xp.ndarray, - e2: xp.ndarray, - e3: xp.ndarray, + e1: np.ndarray, + e2: np.ndarray, + e3: np.ndarray, flat_eval: bool = False, nfp: int = 1, verbose: bool = False, @@ -2785,7 +2773,7 @@ def desc_eval( Desc equilibrium quantitiy to evaluate, from `https://desc-docs.readthedocs.io/en/latest/variables.html#list-of-variables`_. - e1, e2, e3 : xp.ndarray + e1, e2, e3 : np.ndarray Input grids, either 1d or 3d. flat_eval : bool @@ -2804,21 +2792,21 @@ def desc_eval( warnings.filterwarnings("ignore") ttime = time() # Fix issue 353 with float dummy etas - e1 = xp.array([e1]) if isinstance(e1, float) else e1 - e2 = xp.array([e2]) if isinstance(e2, float) else e2 - e3 = xp.array([e3]) if isinstance(e3, float) else e3 + e1 = np.array([e1]) if isinstance(e1, float) else e1 + e2 = np.array([e2]) if isinstance(e2, float) else e2 + e3 = np.array([e3]) if isinstance(e3, float) else e3 # transform input grids if e1.ndim == 3: assert e1.shape == e2.shape == e3.shape rho = self.rmin + e1[:, 0, 0] * (1.0 - self.rmin) - theta = 2 * xp.pi * e2[0, :, 0] - zeta = 2 * xp.pi * e3[0, 0, :] / nfp + theta = 2 * np.pi * e2[0, :, 0] + zeta = 2 * np.pi * e3[0, 0, :] / nfp else: assert e1.ndim == e2.ndim == e3.ndim == 1 rho = self.rmin + e1 * (1.0 - self.rmin) - theta = 2 * xp.pi * e2 - zeta = 2 * xp.pi * e3 / nfp + theta = 2 * np.pi * e2 + zeta = 2 * np.pi * e3 / nfp # eval type if flat_eval: @@ -2827,13 +2815,13 @@ def desc_eval( t = theta z = zeta else: - r, t, z = xp.meshgrid(rho, theta, zeta, indexing="ij") + r, t, z = np.meshgrid(rho, theta, zeta, indexing="ij") r = r.flatten() t = t.flatten() z = z.flatten() - nodes = xp.stack((r, t, z)).T - grid_3d = Grid(nodes, spacing=xp.ones_like(nodes), jitable=False) + nodes = np.stack((r, t, z)).T + grid_3d = Grid(nodes, spacing=np.ones_like(nodes), jitable=False) # compute output corresponding to the generated desc grid node_values = self.eq.compute( @@ -2874,32 +2862,32 @@ def desc_eval( )[0, 0, :] # make sure the desc grid is correct - assert xp.all(rho == rho1) - assert xp.all(theta == theta1) - assert xp.all(zeta == zeta1) + assert np.all(rho == rho1) + assert np.all(theta == theta1) + assert np.all(zeta == zeta1) if verbose: # import sys - print(f"\n{nfp =}") - print(f"{self.eq.axis =}") - print(f"{rho.size =}") - print(f"{theta.size =}") - print(f"{zeta.size =}") - print(f"{grid_3d.num_rho =}") - print(f"{grid_3d.num_theta =}") - print(f"{grid_3d.num_zeta =}") + print(f"\n{nfp = }") + print(f"{self.eq.axis = }") + print(f"{rho.size = }") + print(f"{theta.size = }") + print(f"{zeta.size = }") + print(f"{grid_3d.num_rho = }") + print(f"{grid_3d.num_theta = }") + print(f"{grid_3d.num_zeta = }") # print(f'\n{grid_3d.nodes[:, 0] = }') # print(f'\n{grid_3d.nodes[:, 1] = }') # print(f'\n{grid_3d.nodes[:, 2] = }') - print(f"\n{rho =}") - print(f"{rho1 =}") - print(f"\n{theta =}") - print(f"{theta1 =}") - print(f"\n{zeta =}") - print(f"{zeta1 =}") + print(f"\n{rho = }") + print(f"{rho1 = }") + print(f"\n{theta = }") + print(f"{theta1 = }") + print(f"\n{zeta = }") + print(f"{zeta1 = }") # make c-contiguous - out = xp.ascontiguousarray(out) + out = np.ascontiguousarray(out) print(f"desc_eval for {var}: {time() - ttime} seconds") return out @@ -2917,7 +2905,7 @@ def __init__( uz: float = 0.0, n: float = 1.0, n1: float = 0.0, - density_profile: str = "constant", + density_profile: str = "affine", p0: float = 1.0, ): # use params setter @@ -2947,12 +2935,12 @@ def n_xyz(self, x, y, z): elif self.params["density_profile"] == "affine": return self.params["n"] + self.params["n1"] * x elif self.params["density_profile"] == "gaussian_xy": - return self.params["n"] * xp.exp(-(x**2 + y**2) / self.params["p0"]) + return self.params["n"] * np.exp(-(x**2 + y**2) / self.params["p0"]) elif self.params["density_profile"] == "step_function_x": out = 1e-8 + 0 * x - # mask_x = xp.logical_and(x < .6, x > .4) - # mask_y = xp.logical_and(y < .6, y > .4) - # mask = xp.logical_and(mask_x, mask_y) + # mask_x = np.logical_and(x < .6, x > .4) + # mask_y = np.logical_and(y < .6, y > .4) + # mask = np.logical_and(mask_x, mask_y) mask = x < -2.0 out[mask] = self.params["n"] return out @@ -3278,7 +3266,7 @@ def plot_profiles(self, n_pts=501): import matplotlib.pyplot as plt - r = xp.linspace(0.0, self.params["a"], n_pts) + r = np.linspace(0.0, self.params["a"], n_pts) fig, ax = plt.subplots(1, 3) @@ -3289,7 +3277,7 @@ def plot_profiles(self, n_pts=501): ax[0].set_xlabel("r") ax[0].set_ylabel("q") - ax[0].plot(r, xp.ones(r.size), "k--") + ax[0].plot(r, np.ones(r.size), "k--") ax[1].plot(r, self.p_r(r)) ax[1].set_xlabel("r") @@ -3312,8 +3300,8 @@ def b_xyz(self, x, y, z): """Magnetic field.""" bz = 0 * x - by = xp.tanh(z / self._params["delta"]) - bx = xp.sqrt(1 - by**2) + by = np.tanh(z / self._params["delta"]) + bx = np.sqrt(1 - by**2) bxs = self._params["amp"] * bx bys = self._params["amp"] * by diff --git a/src/struphy/fields_background/generic.py b/src/struphy/fields_background/generic.py index 0b82d7e17..f309f1eef 100644 --- a/src/struphy/fields_background/generic.py +++ b/src/struphy/fields_background/generic.py @@ -1,5 +1,3 @@ -import copy - from struphy.fields_background.base import ( CartesianFluidEquilibrium, CartesianFluidEquilibriumWithB, @@ -19,9 +17,6 @@ def __init__( p_xyz: callable = None, n_xyz: callable = None, ): - # use params setter - self.params = copy.deepcopy(locals()) - if u_xyz is None: u_xyz = lambda x, y, z: (0.0 * x, 0.0 * x, 0.0 * x) else: @@ -62,9 +57,6 @@ def __init__( b_xyz: callable = None, gradB_xyz: callable = None, ): - # use params setter - self.params = copy.deepcopy(locals()) - super().__init__(u_xyz=u_xyz, p_xyz=p_xyz, n_xyz=n_xyz) if b_xyz is None: diff --git a/src/struphy/fields_background/mhd_equil/eqdsk/readeqdsk.py b/src/struphy/fields_background/mhd_equil/eqdsk/readeqdsk.py index 812381e87..67578ce17 100644 --- a/src/struphy/fields_background/mhd_equil/eqdsk/readeqdsk.py +++ b/src/struphy/fields_background/mhd_equil/eqdsk/readeqdsk.py @@ -173,11 +173,7 @@ def main(): action="store_true", ) parser.add_option( - "-v", - "--vars", - dest="vars", - help="comma separated list of variables (use '-v \"*\"' for all)", - default="*", + "-v", "--vars", dest="vars", help="comma separated list of variables (use '-v \"*\"' for all)", default="*" ) parser.add_option( "-p", diff --git a/src/struphy/fields_background/projected_equils.py b/src/struphy/fields_background/projected_equils.py index 26fa4f9c8..afd3d15fa 100644 --- a/src/struphy/fields_background/projected_equils.py +++ b/src/struphy/fields_background/projected_equils.py @@ -1,6 +1,3 @@ -from psydac.linalg.block import BlockVector -from psydac.linalg.stencil import StencilVector - from struphy.feec.psydac_derham import Derham from struphy.fields_background.base import ( FluidEquilibrium, @@ -103,7 +100,7 @@ def absB3(self): return coeffs @property - def p3(self) -> StencilVector: + def p3(self): tmp = self._P3(self.equil.p3) coeffs = self._E3T.dot(tmp) coeffs.update_ghost_regions() @@ -259,7 +256,7 @@ def a1(self): # 2-forms # # ---------# @property - def b2(self) -> BlockVector: + def b2(self): tmp = self._P2([self.equil.b2_1, self.equil.b2_2, self.equil.b2_3]) coeffs = self._E2T.dot(tmp) coeffs.update_ghost_regions() diff --git a/src/struphy/fields_background/tests/test_desc_equil.py b/src/struphy/fields_background/tests/test_desc_equil.py index c7130f0a3..c8f1dc6fe 100644 --- a/src/struphy/fields_background/tests/test_desc_equil.py +++ b/src/struphy/fields_background/tests/test_desc_equil.py @@ -1,9 +1,10 @@ import importlib.util -import cunumpy as xp import pytest from matplotlib import pyplot as plt +from struphy.utils.arrays import xp as np + desc_spec = importlib.util.find_spec("desc") @@ -32,9 +33,9 @@ def test_desc_equil(do_plot=False): n2 = 9 n3 = 11 - e1 = xp.linspace(0.0001, 1, n1) - e2 = xp.linspace(0, 1, n2) - e3 = xp.linspace(0, 1 - 1e-6, n3) + e1 = np.linspace(0.0001, 1, n1) + e2 = np.linspace(0, 1, n2) + e3 = np.linspace(0, 1 - 1e-6, n3) # desc grid and evaluation vars = [ @@ -69,43 +70,43 @@ def test_desc_equil(do_plot=False): outs[nfp] = {} rho = rmin + e1 * (1.0 - rmin) - theta = 2 * xp.pi * e2 - zeta = 2 * xp.pi * e3 / nfp + theta = 2 * np.pi * e2 + zeta = 2 * np.pi * e3 / nfp - r, t, ze = xp.meshgrid(rho, theta, zeta, indexing="ij") + r, t, ze = np.meshgrid(rho, theta, zeta, indexing="ij") r = r.flatten() t = t.flatten() ze = ze.flatten() - nodes = xp.stack((r, t, ze)).T - grid_3d = Grid(nodes, spacing=xp.ones_like(nodes), jitable=False) + nodes = np.stack((r, t, ze)).T + grid_3d = Grid(nodes, spacing=np.ones_like(nodes), jitable=False) for var in vars: node_values = desc_eq.compute(var, grid=grid_3d, override_grid=False) if node_values[var].ndim == 1: out = node_values[var].reshape((rho.size, theta.size, zeta.size), order="C") - outs[nfp][var] = xp.ascontiguousarray(out) + outs[nfp][var] = np.ascontiguousarray(out) else: B = [] for i in range(3): Bcomp = node_values[var][:, i].reshape((rho.size, theta.size, zeta.size), order="C") - Bcomp = xp.ascontiguousarray(Bcomp) + Bcomp = np.ascontiguousarray(Bcomp) B += [Bcomp] outs[nfp][var + str(i + 1)] = Bcomp - outs[nfp][var] = xp.sqrt(B[0] ** 2 + B[1] ** 2 + B[2] ** 2) + outs[nfp][var] = np.sqrt(B[0] ** 2 + B[1] ** 2 + B[2] ** 2) - assert xp.allclose(outs[nfp]["B1"], outs[nfp]["B_R"]) - assert xp.allclose(outs[nfp]["B2"], outs[nfp]["B_phi"]) - assert xp.allclose(outs[nfp]["B3"], outs[nfp]["B_Z"]) + assert np.allclose(outs[nfp]["B1"], outs[nfp]["B_R"]) + assert np.allclose(outs[nfp]["B2"], outs[nfp]["B_phi"]) + assert np.allclose(outs[nfp]["B3"], outs[nfp]["B_Z"]) - assert xp.allclose(outs[nfp]["J1"], outs[nfp]["J_R"]) - assert xp.allclose(outs[nfp]["J2"], outs[nfp]["J_phi"]) - assert xp.allclose(outs[nfp]["J3"], outs[nfp]["J_Z"]) + assert np.allclose(outs[nfp]["J1"], outs[nfp]["J_R"]) + assert np.allclose(outs[nfp]["J2"], outs[nfp]["J_phi"]) + assert np.allclose(outs[nfp]["J3"], outs[nfp]["J_Z"]) - outs[nfp]["Bx"] = xp.cos(outs[nfp]["phi"]) * outs[nfp]["B_R"] - xp.sin(outs[nfp]["phi"]) * outs[nfp]["B_phi"] + outs[nfp]["Bx"] = np.cos(outs[nfp]["phi"]) * outs[nfp]["B_R"] - np.sin(outs[nfp]["phi"]) * outs[nfp]["B_phi"] - outs[nfp]["By"] = xp.sin(outs[nfp]["phi"]) * outs[nfp]["B_R"] + xp.cos(outs[nfp]["phi"]) * outs[nfp]["B_phi"] + outs[nfp]["By"] = np.sin(outs[nfp]["phi"]) * outs[nfp]["B_R"] + np.cos(outs[nfp]["phi"]) * outs[nfp]["B_phi"] outs[nfp]["Bz"] = outs[nfp]["B_Z"] @@ -122,32 +123,32 @@ def test_desc_equil(do_plot=False): outs_struphy[nfp]["Y"] = y outs_struphy[nfp]["Z"] = z - outs_struphy[nfp]["R"] = xp.sqrt(x**2 + y**2) - tmp = xp.arctan2(y, x) - tmp[tmp < -1e-6] += 2 * xp.pi + outs_struphy[nfp]["R"] = np.sqrt(x**2 + y**2) + tmp = np.arctan2(y, x) + tmp[tmp < -1e-6] += 2 * np.pi outs_struphy[nfp]["phi"] = tmp - outs_struphy[nfp]["sqrt(g)"] = s_eq.domain.jacobian_det(e1, e2, e3) / (4 * xp.pi**2 / nfp) + outs_struphy[nfp]["sqrt(g)"] = s_eq.domain.jacobian_det(e1, e2, e3) / (4 * np.pi**2 / nfp) outs_struphy[nfp]["p"] = s_eq.p0(e1, e2, e3) # include push forward to DESC logical coordinates bv = s_eq.bv(e1, e2, e3) outs_struphy[nfp]["B^rho"] = bv[0] * (1 - rmin) - outs_struphy[nfp]["B^theta"] = bv[1] * 2 * xp.pi - outs_struphy[nfp]["B^zeta"] = bv[2] * 2 * xp.pi / nfp + outs_struphy[nfp]["B^theta"] = bv[1] * 2 * np.pi + outs_struphy[nfp]["B^zeta"] = bv[2] * 2 * np.pi / nfp outs_struphy[nfp]["B"] = s_eq.absB0(e1, e2, e3) # include push forward to DESC logical coordinates jv = s_eq.jv(e1, e2, e3) outs_struphy[nfp]["J^rho"] = jv[0] * (1 - rmin) - outs_struphy[nfp]["J^theta"] = jv[1] * 2 * xp.pi - outs_struphy[nfp]["J^zeta"] = jv[2] * 2 * xp.pi / nfp + outs_struphy[nfp]["J^theta"] = jv[1] * 2 * np.pi + outs_struphy[nfp]["J^zeta"] = jv[2] * 2 * np.pi / nfp j1 = s_eq.j1(e1, e2, e3) - outs_struphy[nfp]["J"] = xp.sqrt(jv[0] * j1[0] + jv[1] * j1[1] + jv[2] * j1[2]) + outs_struphy[nfp]["J"] = np.sqrt(jv[0] * j1[0] + jv[1] * j1[1] + jv[2] * j1[2]) b_cart, xyz = s_eq.b_cart(e1, e2, e3) outs_struphy[nfp]["Bx"] = b_cart[0] @@ -157,8 +158,8 @@ def test_desc_equil(do_plot=False): # include push forward to DESC logical coordinates gradB1 = s_eq.gradB1(e1, e2, e3) outs_struphy[nfp]["|B|_r"] = gradB1[0] / (1 - rmin) - outs_struphy[nfp]["|B|_t"] = gradB1[1] / (2 * xp.pi) - outs_struphy[nfp]["|B|_z"] = gradB1[2] / (2 * xp.pi / nfp) + outs_struphy[nfp]["|B|_t"] = gradB1[1] / (2 * np.pi) + outs_struphy[nfp]["|B|_z"] = gradB1[2] / (2 * np.pi / nfp) # comparisons vars += ["Bx", "By", "Bz"] @@ -167,25 +168,25 @@ def test_desc_equil(do_plot=False): err_lim = 0.09 for nfp in nfps: - print(f"\n{nfp =}") + print(f"\n{nfp = }") for var in vars: if var in ("B_R", "B_phi", "B_Z", "J_R", "J_phi", "J_Z"): continue else: - max_norm = xp.max(xp.abs(outs[nfp][var])) + max_norm = np.max(np.abs(outs[nfp][var])) if max_norm < 1e-16: max_norm = 1.0 - err = xp.max(xp.abs(outs[nfp][var] - outs_struphy[nfp][var])) / max_norm + err = np.max(np.abs(outs[nfp][var] - outs_struphy[nfp][var])) / max_norm assert err < err_lim print( - f"compare {var}: {err =}", + f"compare {var}: {err = }", ) if do_plot: fig = plt.figure(figsize=(12, 13)) - levels = xp.linspace(xp.min(outs[nfp][var]) - 1e-10, xp.max(outs[nfp][var]), 20) + levels = np.linspace(np.min(outs[nfp][var]) - 1e-10, np.max(outs[nfp][var]), 20) # poloidal plot R = outs[nfp]["R"][:, :, 0].squeeze() @@ -193,7 +194,7 @@ def test_desc_equil(do_plot=False): plt.subplot(2, 2, 1) map1 = plt.contourf(R, Z, outs[nfp][var][:, :, 0], levels=levels) - plt.title(f"DESC, {var =}, {nfp =}") + plt.title(f"DESC, {var = }, {nfp = }") plt.xlabel("$R$") plt.ylabel("$Z$") plt.axis("equal") @@ -201,7 +202,7 @@ def test_desc_equil(do_plot=False): plt.subplot(2, 2, 2) map2 = plt.contourf(R, Z, outs_struphy[nfp][var][:, :, 0], levels=levels) - plt.title(f"Struphy, {err =}") + plt.title(f"Struphy, {err = }") plt.xlabel("$R$") plt.ylabel("$Z$") plt.axis("equal") @@ -217,7 +218,7 @@ def test_desc_equil(do_plot=False): plt.subplot(2, 2, 3) map3 = plt.contourf(x1, y1, outs[nfp][var][:, 0, :], levels=levels) map3b = plt.contourf(x2, y2, outs[nfp][var][:, n2 // 2, :], levels=levels) - plt.title(f"DESC, {var =}, {nfp =}") + plt.title(f"DESC, {var = }, {nfp = }") plt.xlabel("$x$") plt.ylabel("$y$") plt.axis("equal") @@ -226,7 +227,7 @@ def test_desc_equil(do_plot=False): plt.subplot(2, 2, 4) map4 = plt.contourf(x1, y1, outs_struphy[nfp][var][:, 0, :], levels=levels) map4b = plt.contourf(x2, y2, outs_struphy[nfp][var][:, n2 // 2, :], levels=levels) - plt.title(f"Struphy, {err =}") + plt.title(f"Struphy, {err = }") plt.xlabel("$x$") plt.ylabel("$y$") plt.axis("equal") diff --git a/src/struphy/fields_background/tests/test_generic_equils.py b/src/struphy/fields_background/tests/test_generic_equils.py index 77ca8baaa..8c10eb80e 100644 --- a/src/struphy/fields_background/tests/test_generic_equils.py +++ b/src/struphy/fields_background/tests/test_generic_equils.py @@ -1,4 +1,3 @@ -import cunumpy as xp import pytest from matplotlib import pyplot as plt @@ -6,11 +5,12 @@ GenericCartesianFluidEquilibrium, GenericCartesianFluidEquilibriumWithB, ) +from struphy.utils.arrays import xp as np def test_generic_equils(show=False): - fun_vec = lambda x, y, z: (xp.cos(2 * xp.pi * x), xp.cos(2 * xp.pi * y), z) - fun_n = lambda x, y, z: xp.exp(-((x - 1) ** 2) - (y) ** 2) + fun_vec = lambda x, y, z: (np.cos(2 * np.pi * x), np.cos(2 * np.pi * y), z) + fun_n = lambda x, y, z: np.exp(-((x - 1) ** 2) - (y) ** 2) fun_p = lambda x, y, z: x**2 gen_eq = GenericCartesianFluidEquilibrium( u_xyz=fun_vec, @@ -25,22 +25,22 @@ def test_generic_equils(show=False): gradB_xyz=fun_vec, ) - x = xp.linspace(-3, 3, 32) - y = xp.linspace(-4, 4, 32) + x = np.linspace(-3, 3, 32) + y = np.linspace(-4, 4, 32) z = 1.0 - xx, yy, zz = xp.meshgrid(x, y, z) + xx, yy, zz = np.meshgrid(x, y, z) # gen_eq - assert all([xp.all(tmp == fun_i) for tmp, fun_i in zip(gen_eq.u_xyz(xx, yy, zz), fun_vec(xx, yy, zz))]) - assert xp.all(gen_eq.p_xyz(xx, yy, zz) == fun_p(xx, yy, zz)) - assert xp.all(gen_eq.n_xyz(xx, yy, zz) == fun_n(xx, yy, zz)) + assert all([np.all(tmp == fun_i) for tmp, fun_i in zip(gen_eq.u_xyz(xx, yy, zz), fun_vec(xx, yy, zz))]) + assert np.all(gen_eq.p_xyz(xx, yy, zz) == fun_p(xx, yy, zz)) + assert np.all(gen_eq.n_xyz(xx, yy, zz) == fun_n(xx, yy, zz)) # gen_eq_B - assert all([xp.all(tmp == fun_i) for tmp, fun_i in zip(gen_eq_B.u_xyz(xx, yy, zz), fun_vec(xx, yy, zz))]) - assert xp.all(gen_eq_B.p_xyz(xx, yy, zz) == fun_p(xx, yy, zz)) - assert xp.all(gen_eq_B.n_xyz(xx, yy, zz) == fun_n(xx, yy, zz)) - assert all([xp.all(tmp == fun_i) for tmp, fun_i in zip(gen_eq_B.b_xyz(xx, yy, zz), fun_vec(xx, yy, zz))]) - assert all([xp.all(tmp == fun_i) for tmp, fun_i in zip(gen_eq_B.gradB_xyz(xx, yy, zz), fun_vec(xx, yy, zz))]) + assert all([np.all(tmp == fun_i) for tmp, fun_i in zip(gen_eq_B.u_xyz(xx, yy, zz), fun_vec(xx, yy, zz))]) + assert np.all(gen_eq_B.p_xyz(xx, yy, zz) == fun_p(xx, yy, zz)) + assert np.all(gen_eq_B.n_xyz(xx, yy, zz) == fun_n(xx, yy, zz)) + assert all([np.all(tmp == fun_i) for tmp, fun_i in zip(gen_eq_B.b_xyz(xx, yy, zz), fun_vec(xx, yy, zz))]) + assert all([np.all(tmp == fun_i) for tmp, fun_i in zip(gen_eq_B.gradB_xyz(xx, yy, zz), fun_vec(xx, yy, zz))]) if show: plt.figure(figsize=(12, 12)) diff --git a/src/struphy/fields_background/tests/test_mhd_equils.py b/src/struphy/fields_background/tests/test_mhd_equils.py index f363ddbe3..bd750e9d9 100644 --- a/src/struphy/fields_background/tests/test_mhd_equils.py +++ b/src/struphy/fields_background/tests/test_mhd_equils.py @@ -1,7 +1,7 @@ -import cunumpy as xp import pytest from struphy.fields_background import equils +from struphy.utils.arrays import xp as np @pytest.mark.parametrize( @@ -9,44 +9,44 @@ [ ("HomogenSlab", {}, "Cuboid", {}), ("HomogenSlab", {}, "Colella", {"alpha": 0.06}), - ("ShearedSlab", {"a": 0.75, "R0": 3.5}, "Cuboid", {"r1": 0.75, "r2": 2 * xp.pi * 0.75, "r3": 2 * xp.pi * 3.5}), + ("ShearedSlab", {"a": 0.75, "R0": 3.5}, "Cuboid", {"r1": 0.75, "r2": 2 * np.pi * 0.75, "r3": 2 * np.pi * 3.5}), ( "ShearedSlab", {"a": 0.75, "R0": 3.5, "q0": "inf", "q1": "inf"}, "Cuboid", - {"r1": 0.75, "r2": 2 * xp.pi * 0.75, "r3": 2 * xp.pi * 3.5}, + {"r1": 0.75, "r2": 2 * np.pi * 0.75, "r3": 2 * np.pi * 3.5}, ), ( "ShearedSlab", {"a": 0.55, "R0": 4.5}, "Orthogonal", - {"Lx": 0.55, "Ly": 2 * xp.pi * 0.55, "Lz": 2 * xp.pi * 4.5}, + {"Lx": 0.55, "Ly": 2 * np.pi * 0.55, "Lz": 2 * np.pi * 4.5}, ), - ("ScrewPinch", {"a": 0.45, "R0": 2.5}, "HollowCylinder", {"a1": 0.05, "a2": 0.45, "Lz": 2 * xp.pi * 2.5}), - ("ScrewPinch", {"a": 1.45, "R0": 6.5}, "IGAPolarCylinder", {"a": 1.45, "Lz": 2 * xp.pi * 6.5}), + ("ScrewPinch", {"a": 0.45, "R0": 2.5}, "HollowCylinder", {"a1": 0.05, "a2": 0.45, "Lz": 2 * np.pi * 2.5}), + ("ScrewPinch", {"a": 1.45, "R0": 6.5}, "IGAPolarCylinder", {"a": 1.45, "Lz": 2 * np.pi * 6.5}), ( "ScrewPinch", {"a": 0.45, "R0": 2.5, "q0": 1.5, "q1": 1.5}, "HollowCylinder", - {"a1": 0.05, "a2": 0.45, "Lz": 2 * xp.pi * 2.5}, + {"a1": 0.05, "a2": 0.45, "Lz": 2 * np.pi * 2.5}, ), ( "ScrewPinch", {"a": 1.45, "R0": 6.5, "q0": 1.5, "q1": 1.5}, "IGAPolarCylinder", - {"a": 1.45, "Lz": 2 * xp.pi * 6.5}, + {"a": 1.45, "Lz": 2 * np.pi * 6.5}, ), ( "ScrewPinch", {"a": 0.45, "R0": 2.5, "q0": "inf", "q1": "inf"}, "HollowCylinder", - {"a1": 0.05, "a2": 0.45, "Lz": 2 * xp.pi * 2.5}, + {"a1": 0.05, "a2": 0.45, "Lz": 2 * np.pi * 2.5}, ), ( "ScrewPinch", {"a": 1.45, "R0": 6.5, "q0": "inf", "q1": "inf"}, "IGAPolarCylinder", - {"a": 1.45, "Lz": 2 * xp.pi * 6.5}, + {"a": 1.45, "Lz": 2 * np.pi * 6.5}, ), ( "AdhocTorus", @@ -136,28 +136,29 @@ def test_equils(equil_domain_pair): Test field evaluations of all implemented MHD equilbria with default parameters. """ + from struphy.fields_background import equils from struphy.fields_background.base import CartesianMHDequilibrium, NumericalMHDequilibrium from struphy.geometry import domains # logical evalution point - pt = (xp.random.rand(), xp.random.rand(), xp.random.rand()) + pt = (np.random.rand(), np.random.rand(), np.random.rand()) # logical arrays: - e1 = xp.random.rand(4) - e2 = xp.random.rand(5) - e3 = xp.random.rand(6) + e1 = np.random.rand(4) + e2 = np.random.rand(5) + e3 = np.random.rand(6) # 2d slices - mat_12_1, mat_12_2 = xp.meshgrid(e1, e2, indexing="ij") - mat_13_1, mat_13_3 = xp.meshgrid(e1, e3, indexing="ij") - mat_23_2, mat_23_3 = xp.meshgrid(e2, e3, indexing="ij") + mat_12_1, mat_12_2 = np.meshgrid(e1, e2, indexing="ij") + mat_13_1, mat_13_3 = np.meshgrid(e1, e3, indexing="ij") + mat_23_2, mat_23_3 = np.meshgrid(e2, e3, indexing="ij") # 3d - mat_123_1, mat_123_2, mat_123_3 = xp.meshgrid(e1, e2, e3, indexing="ij") - mat_123_1_sp, mat_123_2_sp, mat_123_3_sp = xp.meshgrid(e1, e2, e3, indexing="ij", sparse=True) + mat_123_1, mat_123_2, mat_123_3 = np.meshgrid(e1, e2, e3, indexing="ij") + mat_123_1_sp, mat_123_2_sp, mat_123_3_sp = np.meshgrid(e1, e2, e3, indexing="ij", sparse=True) # markers - markers = xp.random.rand(33, 10) + markers = np.random.rand(33, 10) # create MHD equilibrium eq_mhd = getattr(equils, equil_domain_pair[0])(**equil_domain_pair[1]) @@ -273,8 +274,8 @@ def test_equils(equil_domain_pair): # --------- eta1 evaluation --------- results = [] - e2_pt = xp.random.rand() - e3_pt = xp.random.rand() + e2_pt = np.random.rand() + e3_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(e1, e2_pt, e3_pt, squeeze_out=True)) @@ -320,8 +321,8 @@ def test_equils(equil_domain_pair): # --------- eta2 evaluation --------- results = [] - e1_pt = xp.random.rand() - e3_pt = xp.random.rand() + e1_pt = np.random.rand() + e3_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(e1_pt, e2, e3_pt, squeeze_out=True)) @@ -369,8 +370,8 @@ def test_equils(equil_domain_pair): # --------- eta3 evaluation --------- results = [] - e1_pt = xp.random.rand() - e2_pt = xp.random.rand() + e1_pt = np.random.rand() + e2_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(e1_pt, e2_pt, e3, squeeze_out=True)) @@ -418,7 +419,7 @@ def test_equils(equil_domain_pair): # --------- eta1-eta2 evaluation --------- results = [] - e3_pt = xp.random.rand() + e3_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(e1, e2, e3_pt, squeeze_out=True)) @@ -466,7 +467,7 @@ def test_equils(equil_domain_pair): # --------- eta1-eta3 evaluation --------- results = [] - e2_pt = xp.random.rand() + e2_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(e1, e2_pt, e3, squeeze_out=True)) @@ -514,7 +515,7 @@ def test_equils(equil_domain_pair): # --------- eta2-eta3 evaluation --------- results = [] - e1_pt = xp.random.rand() + e1_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(e1_pt, e2, e3, squeeze_out=True)) @@ -608,7 +609,7 @@ def test_equils(equil_domain_pair): # --------- 12 matrix evaluation --------- results = [] - e3_pt = xp.random.rand() + e3_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(mat_12_1, mat_12_2, e3_pt, squeeze_out=True)) @@ -656,7 +657,7 @@ def test_equils(equil_domain_pair): # --------- 13 matrix evaluation --------- results = [] - e2_pt = xp.random.rand() + e2_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(mat_13_1, e2_pt, mat_13_3, squeeze_out=True)) @@ -704,7 +705,7 @@ def test_equils(equil_domain_pair): # --------- 23 matrix evaluation --------- results = [] - e1_pt = xp.random.rand() + e1_pt = np.random.rand() # scalar functions results.append(eq_mhd.absB0(e1_pt, mat_23_2, mat_23_3, squeeze_out=True)) @@ -847,22 +848,22 @@ def assert_scalar(result, kind, *etas): markers = etas[0] n_p = markers.shape[0] - assert isinstance(result, xp.ndarray) + assert isinstance(result, np.ndarray) assert result.shape == (n_p,) for ip in range(n_p): assert isinstance(result[ip], float) - assert not xp.isnan(result[ip]) + assert not np.isnan(result[ip]) else: # point-wise if kind == "point": assert isinstance(result, float) - assert not xp.isnan(result) + assert not np.isnan(result) # slices else: - assert isinstance(result, xp.ndarray) + assert isinstance(result, np.ndarray) # eta1-array if kind == "e1": @@ -914,27 +915,27 @@ def assert_vector(result, kind, *etas): markers = etas[0] n_p = markers.shape[0] - assert isinstance(result, xp.ndarray) + assert isinstance(result, np.ndarray) assert result.shape == (3, n_p) for c in range(3): for ip in range(n_p): assert isinstance(result[c, ip], float) - assert not xp.isnan(result[c, ip]) + assert not np.isnan(result[c, ip]) else: # point-wise if kind == "point": - assert isinstance(result, xp.ndarray) + assert isinstance(result, np.ndarray) assert result.shape == (3,) for c in range(3): assert isinstance(result[c], float) - assert not xp.isnan(result[c]) + assert not np.isnan(result[c]) # slices else: - assert isinstance(result, xp.ndarray) + assert isinstance(result, np.ndarray) # eta1-array if kind == "e1": diff --git a/src/struphy/fields_background/tests/test_numerical_mhd_equil.py b/src/struphy/fields_background/tests/test_numerical_mhd_equil.py index aa1278d5d..4d34e8352 100644 --- a/src/struphy/fields_background/tests/test_numerical_mhd_equil.py +++ b/src/struphy/fields_background/tests/test_numerical_mhd_equil.py @@ -1,7 +1,7 @@ -import cunumpy as xp import pytest from struphy.fields_background.base import FluidEquilibrium, LogicalMHDequilibrium +from struphy.utils.arrays import xp as np @pytest.mark.parametrize( @@ -50,53 +50,53 @@ def test_transformations(mapping, mhd_equil): num_equil = NumEqTest(domain, proxy) # compare values: - eta1 = xp.random.rand(4) - eta2 = xp.random.rand(5) - eta3 = xp.random.rand(6) + eta1 = np.random.rand(4) + eta2 = np.random.rand(5) + eta3 = np.random.rand(6) - assert xp.allclose(ana_equil.absB0(eta1, eta2, eta3), num_equil.absB0(eta1, eta2, eta3)) + assert np.allclose(ana_equil.absB0(eta1, eta2, eta3), num_equil.absB0(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.bv(eta1, eta2, eta3)[0], num_equil.bv(eta1, eta2, eta3)[0]) - assert xp.allclose(ana_equil.bv(eta1, eta2, eta3)[1], num_equil.bv(eta1, eta2, eta3)[1]) - assert xp.allclose(ana_equil.bv(eta1, eta2, eta3)[2], num_equil.bv(eta1, eta2, eta3)[2]) + assert np.allclose(ana_equil.bv(eta1, eta2, eta3)[0], num_equil.bv(eta1, eta2, eta3)[0]) + assert np.allclose(ana_equil.bv(eta1, eta2, eta3)[1], num_equil.bv(eta1, eta2, eta3)[1]) + assert np.allclose(ana_equil.bv(eta1, eta2, eta3)[2], num_equil.bv(eta1, eta2, eta3)[2]) - assert xp.allclose(ana_equil.b1_1(eta1, eta2, eta3), num_equil.b1_1(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.b1_2(eta1, eta2, eta3), num_equil.b1_2(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.b1_3(eta1, eta2, eta3), num_equil.b1_3(eta1, eta2, eta3)) + assert np.allclose(ana_equil.b1_1(eta1, eta2, eta3), num_equil.b1_1(eta1, eta2, eta3)) + assert np.allclose(ana_equil.b1_2(eta1, eta2, eta3), num_equil.b1_2(eta1, eta2, eta3)) + assert np.allclose(ana_equil.b1_3(eta1, eta2, eta3), num_equil.b1_3(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.b2_1(eta1, eta2, eta3), num_equil.b2_1(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.b2_2(eta1, eta2, eta3), num_equil.b2_2(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.b2_3(eta1, eta2, eta3), num_equil.b2_3(eta1, eta2, eta3)) + assert np.allclose(ana_equil.b2_1(eta1, eta2, eta3), num_equil.b2_1(eta1, eta2, eta3)) + assert np.allclose(ana_equil.b2_2(eta1, eta2, eta3), num_equil.b2_2(eta1, eta2, eta3)) + assert np.allclose(ana_equil.b2_3(eta1, eta2, eta3), num_equil.b2_3(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.unit_bv(eta1, eta2, eta3)[0], num_equil.unit_bv(eta1, eta2, eta3)[0]) - assert xp.allclose(ana_equil.unit_bv(eta1, eta2, eta3)[1], num_equil.unit_bv(eta1, eta2, eta3)[1]) - assert xp.allclose(ana_equil.unit_bv(eta1, eta2, eta3)[2], num_equil.unit_bv(eta1, eta2, eta3)[2]) + assert np.allclose(ana_equil.unit_bv(eta1, eta2, eta3)[0], num_equil.unit_bv(eta1, eta2, eta3)[0]) + assert np.allclose(ana_equil.unit_bv(eta1, eta2, eta3)[1], num_equil.unit_bv(eta1, eta2, eta3)[1]) + assert np.allclose(ana_equil.unit_bv(eta1, eta2, eta3)[2], num_equil.unit_bv(eta1, eta2, eta3)[2]) - assert xp.allclose(ana_equil.unit_b1_1(eta1, eta2, eta3), num_equil.unit_b1_1(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.unit_b1_2(eta1, eta2, eta3), num_equil.unit_b1_2(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.unit_b1_3(eta1, eta2, eta3), num_equil.unit_b1_3(eta1, eta2, eta3)) + assert np.allclose(ana_equil.unit_b1_1(eta1, eta2, eta3), num_equil.unit_b1_1(eta1, eta2, eta3)) + assert np.allclose(ana_equil.unit_b1_2(eta1, eta2, eta3), num_equil.unit_b1_2(eta1, eta2, eta3)) + assert np.allclose(ana_equil.unit_b1_3(eta1, eta2, eta3), num_equil.unit_b1_3(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.unit_b2_1(eta1, eta2, eta3), num_equil.unit_b2_1(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.unit_b2_2(eta1, eta2, eta3), num_equil.unit_b2_2(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.unit_b2_3(eta1, eta2, eta3), num_equil.unit_b2_3(eta1, eta2, eta3)) + assert np.allclose(ana_equil.unit_b2_1(eta1, eta2, eta3), num_equil.unit_b2_1(eta1, eta2, eta3)) + assert np.allclose(ana_equil.unit_b2_2(eta1, eta2, eta3), num_equil.unit_b2_2(eta1, eta2, eta3)) + assert np.allclose(ana_equil.unit_b2_3(eta1, eta2, eta3), num_equil.unit_b2_3(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.jv(eta1, eta2, eta3)[0], num_equil.jv(eta1, eta2, eta3)[0]) - assert xp.allclose(ana_equil.jv(eta1, eta2, eta3)[1], num_equil.jv(eta1, eta2, eta3)[1]) - assert xp.allclose(ana_equil.jv(eta1, eta2, eta3)[2], num_equil.jv(eta1, eta2, eta3)[2]) + assert np.allclose(ana_equil.jv(eta1, eta2, eta3)[0], num_equil.jv(eta1, eta2, eta3)[0]) + assert np.allclose(ana_equil.jv(eta1, eta2, eta3)[1], num_equil.jv(eta1, eta2, eta3)[1]) + assert np.allclose(ana_equil.jv(eta1, eta2, eta3)[2], num_equil.jv(eta1, eta2, eta3)[2]) - assert xp.allclose(ana_equil.j1_1(eta1, eta2, eta3), num_equil.j1_1(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.j1_2(eta1, eta2, eta3), num_equil.j1_2(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.j1_3(eta1, eta2, eta3), num_equil.j1_3(eta1, eta2, eta3)) + assert np.allclose(ana_equil.j1_1(eta1, eta2, eta3), num_equil.j1_1(eta1, eta2, eta3)) + assert np.allclose(ana_equil.j1_2(eta1, eta2, eta3), num_equil.j1_2(eta1, eta2, eta3)) + assert np.allclose(ana_equil.j1_3(eta1, eta2, eta3), num_equil.j1_3(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.j2_1(eta1, eta2, eta3), num_equil.j2_1(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.j2_2(eta1, eta2, eta3), num_equil.j2_2(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.j2_3(eta1, eta2, eta3), num_equil.j2_3(eta1, eta2, eta3)) + assert np.allclose(ana_equil.j2_1(eta1, eta2, eta3), num_equil.j2_1(eta1, eta2, eta3)) + assert np.allclose(ana_equil.j2_2(eta1, eta2, eta3), num_equil.j2_2(eta1, eta2, eta3)) + assert np.allclose(ana_equil.j2_3(eta1, eta2, eta3), num_equil.j2_3(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.p0(eta1, eta2, eta3), num_equil.p0(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.p3(eta1, eta2, eta3), num_equil.p3(eta1, eta2, eta3)) + assert np.allclose(ana_equil.p0(eta1, eta2, eta3), num_equil.p0(eta1, eta2, eta3)) + assert np.allclose(ana_equil.p3(eta1, eta2, eta3), num_equil.p3(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.n0(eta1, eta2, eta3), num_equil.n0(eta1, eta2, eta3)) - assert xp.allclose(ana_equil.n3(eta1, eta2, eta3), num_equil.n3(eta1, eta2, eta3)) + assert np.allclose(ana_equil.n0(eta1, eta2, eta3), num_equil.n0(eta1, eta2, eta3)) + assert np.allclose(ana_equil.n3(eta1, eta2, eta3), num_equil.n3(eta1, eta2, eta3)) class NumEqTest(LogicalMHDequilibrium): diff --git a/src/struphy/geometry/base.py b/src/struphy/geometry/base.py index d2b21688e..2a3141e96 100644 --- a/src/struphy/geometry/base.py +++ b/src/struphy/geometry/base.py @@ -3,7 +3,6 @@ from abc import ABCMeta, abstractmethod -import cunumpy as xp import h5py from scipy.sparse import csc_matrix, kron from scipy.sparse.linalg import splu, spsolve @@ -12,6 +11,7 @@ from struphy.geometry import evaluation_kernels, transform_kernels from struphy.kernel_arguments.pusher_args_kernels import DomainArguments from struphy.linear_algebra import linalg_kron +from struphy.utils.arrays import xp as np class Domain(metaclass=ABCMeta): @@ -56,12 +56,12 @@ def __init__( self._NbaseN = [Nel + p - kind * p for Nel, p, kind in zip(Nel, p, spl_kind)] - el_b = [xp.linspace(0.0, 1.0, Nel + 1) for Nel in Nel] + el_b = [np.linspace(0.0, 1.0, Nel + 1) for Nel in Nel] self._T = [bsp.make_knots(el_b, p, kind) for el_b, p, kind in zip(el_b, p, spl_kind)] self._indN = [ - (xp.indices((Nel, p + 1))[1] + xp.arange(Nel)[:, None]) % NbaseN + (np.indices((Nel, p + 1))[1] + np.arange(Nel)[:, None]) % NbaseN for Nel, p, NbaseN in zip(Nel, p, self._NbaseN) ] @@ -71,15 +71,15 @@ def __init__( self._p = (*self._p, 0) self._NbaseN = self._NbaseN + [0] - self._T = self._T + [xp.zeros((1,), dtype=float)] + self._T = self._T + [np.zeros((1,), dtype=float)] - self._indN = self._indN + [xp.zeros((1, 1), dtype=int)] + self._indN = self._indN + [np.zeros((1, 1), dtype=int)] # create dummy attributes for analytical mappings if self.kind_map >= 10: - self._cx = xp.zeros((1, 1, 1), dtype=float) - self._cy = xp.zeros((1, 1, 1), dtype=float) - self._cz = xp.zeros((1, 1, 1), dtype=float) + self._cx = np.zeros((1, 1, 1), dtype=float) + self._cy = np.zeros((1, 1, 1), dtype=float) + self._cz = np.zeros((1, 1, 1), dtype=float) self._transformation_ids = { "pull": 0, @@ -120,7 +120,7 @@ def __init__( self._args_domain = DomainArguments( self.kind_map, self.params_numpy, - xp.array(self.p), + np.array(self.p), self.T[0], self.T[1], self.T[2], @@ -165,15 +165,15 @@ def params(self, new): self._params = new @property - def params_numpy(self) -> xp.ndarray: + def params_numpy(self) -> np.ndarray: """Mapping parameters as numpy array (can be empty).""" if not hasattr(self, "_params_numpy"): - self._params_numpy = xp.array([0], dtype=float) + self._params_numpy = np.array([0], dtype=float) return self._params_numpy @params_numpy.setter def params_numpy(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) assert new.ndim == 1 self._params_numpy = new @@ -768,7 +768,7 @@ def _evaluate_metric_coefficient(self, *etas, which=0, **kwargs): markers = etas[0] # to keep C-ordering the (3, 3)-part is in the last indices - out = xp.empty((markers.shape[0], 3, 3), dtype=float) + out = np.empty((markers.shape[0], 3, 3), dtype=float) n_inside = evaluation_kernels.kernel_evaluate_pic( markers, @@ -780,24 +780,24 @@ def _evaluate_metric_coefficient(self, *etas, which=0, **kwargs): ) # move the (3, 3)-part to front - out = xp.transpose(out, axes=(1, 2, 0)) + out = np.transpose(out, axes=(1, 2, 0)) # remove holes out = out[:, :, :n_inside] if transposed: - out = xp.transpose(out, axes=(1, 0, 2)) + out = np.transpose(out, axes=(1, 0, 2)) # change size of "out" depending on which metric coeff has been evaluated if which == 0 or which == -1: out = out[:, 0, :] if change_out_order: - out = xp.transpose(out, axes=(1, 0)) + out = np.transpose(out, axes=(1, 0)) elif which == 2: out = out[0, 0, :] else: if change_out_order: - out = xp.transpose(out, axes=(2, 0, 1)) + out = np.transpose(out, axes=(2, 0, 1)) # tensor-product/slice evaluation else: @@ -809,7 +809,7 @@ def _evaluate_metric_coefficient(self, *etas, which=0, **kwargs): ) # to keep C-ordering the (3, 3)-part is in the last indices - out = xp.empty( + out = np.empty( (E1.shape[0], E2.shape[1], E3.shape[2], 3, 3), dtype=float, ) @@ -825,20 +825,20 @@ def _evaluate_metric_coefficient(self, *etas, which=0, **kwargs): ) # move the (3, 3)-part to front - out = xp.transpose(out, axes=(3, 4, 0, 1, 2)) + out = np.transpose(out, axes=(3, 4, 0, 1, 2)) if transposed: - out = xp.transpose(out, axes=(1, 0, 2, 3, 4)) + out = np.transpose(out, axes=(1, 0, 2, 3, 4)) if which == 0: out = out[:, 0, :, :, :] if change_out_order: - out = xp.transpose(out, axes=(1, 2, 3, 0)) + out = np.transpose(out, axes=(1, 2, 3, 0)) elif which == 2: out = out[0, 0, :, :, :] else: if change_out_order: - out = xp.transpose(out, axes=(2, 3, 4, 0, 1)) + out = np.transpose(out, axes=(2, 3, 4, 0, 1)) # remove singleton dimensions for slice evaluation if squeeze_out: @@ -903,7 +903,7 @@ def _pull_push_transform(self, which, a, kind_fun, *etas, flat_eval=False, **kwa assert len(etas) == 3 assert etas[0].shape == etas[1].shape == etas[2].shape assert etas[0].ndim == 1 - markers = xp.stack(etas, axis=1) + markers = np.stack(etas, axis=1) else: markers = etas[0] @@ -955,7 +955,7 @@ def _pull_push_transform(self, which, a, kind_fun, *etas, flat_eval=False, **kwa A_has_holes = False # call evaluation kernel - out = xp.empty((markers.shape[0], 3), dtype=float) + out = np.empty((markers.shape[0], 3), dtype=float) # make sure we don't have stride = 0 A = A.copy() @@ -971,7 +971,7 @@ def _pull_push_transform(self, which, a, kind_fun, *etas, flat_eval=False, **kwa ) # move the (3, 3)-part to front - out = xp.transpose(out, axes=(1, 0)) + out = np.transpose(out, axes=(1, 0)) # remove holes out = out[:, :n_inside] @@ -985,7 +985,7 @@ def _pull_push_transform(self, which, a, kind_fun, *etas, flat_eval=False, **kwa out = out[0, :] else: if change_out_order: - out = xp.transpose(out, axes=(1, 0)) + out = np.transpose(out, axes=(1, 0)) # tensor-product/slice evaluation else: @@ -1012,7 +1012,7 @@ def _pull_push_transform(self, which, a, kind_fun, *etas, flat_eval=False, **kwa A = Domain.prepare_arg(a, X[0], X[1], X[2], a_kwargs=a_kwargs) # call evaluation kernel - out = xp.empty( + out = np.empty( (E1.shape[0], E2.shape[1], E3.shape[2], 3), dtype=float, ) @@ -1029,14 +1029,14 @@ def _pull_push_transform(self, which, a, kind_fun, *etas, flat_eval=False, **kwa ) # move the (3, 3)-part to front - out = xp.transpose(out, axes=(3, 0, 1, 2)) + out = np.transpose(out, axes=(3, 0, 1, 2)) # change output order if kind_int < 10: out = out[0, :, :, :] else: if change_out_order: - out = xp.transpose(out, axes=(1, 2, 3, 0)) + out = np.transpose(out, axes=(1, 2, 3, 0)) # remove singleton dimensions for slice evaluation if squeeze_out: @@ -1083,22 +1083,22 @@ def prepare_eval_pts(x, y, z, flat_eval=False): if flat_eval: # convert list type data to numpy array: if isinstance(x, list): - arg_x = xp.array(x) - elif isinstance(x, xp.ndarray): + arg_x = np.array(x) + elif isinstance(x, np.ndarray): arg_x = x else: raise ValueError("Input x must be a 1d list or numpy array") if isinstance(y, list): - arg_y = xp.array(y) - elif isinstance(y, xp.ndarray): + arg_y = np.array(y) + elif isinstance(y, np.ndarray): arg_y = y else: raise ValueError("Input y must be a 1d list or numpy array") if isinstance(z, list): - arg_z = xp.array(z) - elif isinstance(z, xp.ndarray): + arg_z = np.array(z) + elif isinstance(z, np.ndarray): arg_z = z else: raise ValueError("Input z must be a 1d list or numpy array") @@ -1117,56 +1117,56 @@ def prepare_eval_pts(x, y, z, flat_eval=False): else: # convert list type data to numpy array: if isinstance(x, float): - arg_x = xp.array([x]) + arg_x = np.array([x]) elif isinstance(x, int): - arg_x = xp.array([float(x)]) + arg_x = np.array([float(x)]) elif isinstance(x, list): - arg_x = xp.array(x) - elif isinstance(x, xp.ndarray): + arg_x = np.array(x) + elif isinstance(x, np.ndarray): arg_x = x.copy() else: raise ValueError(f"data type {type(x)} not supported") if isinstance(y, float): - arg_y = xp.array([y]) + arg_y = np.array([y]) elif isinstance(y, int): - arg_y = xp.array([float(y)]) + arg_y = np.array([float(y)]) elif isinstance(y, list): - arg_y = xp.array(y) - elif isinstance(y, xp.ndarray): + arg_y = np.array(y) + elif isinstance(y, np.ndarray): arg_y = y.copy() else: raise ValueError(f"data type {type(y)} not supported") if isinstance(z, float): - arg_z = xp.array([z]) + arg_z = np.array([z]) elif isinstance(z, int): - arg_z = xp.array([float(z)]) + arg_z = np.array([float(z)]) elif isinstance(z, list): - arg_z = xp.array(z) - elif isinstance(z, xp.ndarray): + arg_z = np.array(z) + elif isinstance(z, np.ndarray): arg_z = z.copy() else: raise ValueError(f"data type {type(z)} not supported") # tensor-product for given three 1D arrays if arg_x.ndim == 1 and arg_y.ndim == 1 and arg_z.ndim == 1: - E1, E2, E3 = xp.meshgrid(arg_x, arg_y, arg_z, indexing="ij") + E1, E2, E3 = np.meshgrid(arg_x, arg_y, arg_z, indexing="ij") # given xy-plane at point z: elif arg_x.ndim == 2 and arg_y.ndim == 2 and arg_z.size == 1: E1 = arg_x[:, :, None] E2 = arg_y[:, :, None] - E3 = arg_z * xp.ones(E1.shape) + E3 = arg_z * np.ones(E1.shape) # given xz-plane at point y: elif arg_x.ndim == 2 and arg_y.size == 1 and arg_z.ndim == 2: E1 = arg_x[:, None, :] - E2 = arg_y * xp.ones(E1.shape) + E2 = arg_y * np.ones(E1.shape) E3 = arg_z[:, None, :] # given yz-plane at point x: elif arg_x.size == 1 and arg_y.ndim == 2 and arg_z.ndim == 2: E2 = arg_y[None, :, :] E3 = arg_z[None, :, :] - E1 = arg_x * xp.ones(E2.shape) + E1 = arg_x * np.ones(E2.shape) # given three 3D arrays elif arg_x.ndim == 3 and arg_y.ndim == 3 and arg_z.ndim == 3: # Distinguish if input coordinates are from sparse or dense meshgrid. @@ -1224,7 +1224,7 @@ def prepare_arg(a_in, *Xs, is_sparse_meshgrid=False, a_kwargs={}): # float (point-wise, scalar function) if isinstance(a_in, float): - a_out = xp.array([[[[a_in]]]]) + a_out = np.array([[[[a_in]]]]) # single callable: # scalar function -> must return a 3d array for 3d evaluation points @@ -1237,7 +1237,7 @@ def prepare_arg(a_in, *Xs, is_sparse_meshgrid=False, a_kwargs={}): else: if is_sparse_meshgrid: a_out = a_in( - *xp.meshgrid(Xs[0][:, 0, 0], Xs[1][0, :, 0], Xs[2][0, 0, :], indexing="ij"), + *np.meshgrid(Xs[0][:, 0, 0], Xs[1][0, :, 0], Xs[2][0, 0, :], indexing="ij"), **a_kwargs, ) else: @@ -1245,7 +1245,7 @@ def prepare_arg(a_in, *Xs, is_sparse_meshgrid=False, a_kwargs={}): # case of Field.__call__ if isinstance(a_out, list): - a_out = xp.array(a_out) + a_out = np.array(a_out) if a_out.ndim == 3: a_out = a_out[None, :, :, :] @@ -1273,7 +1273,7 @@ def prepare_arg(a_in, *Xs, is_sparse_meshgrid=False, a_kwargs={}): if is_sparse_meshgrid: a_out += [ component( - *xp.meshgrid( + *np.meshgrid( Xs[0][:, 0, 0], Xs[1][0, :, 0], Xs[2][0, 0, :], @@ -1285,25 +1285,25 @@ def prepare_arg(a_in, *Xs, is_sparse_meshgrid=False, a_kwargs={}): else: a_out += [component(*Xs, **a_kwargs)] - elif isinstance(component, xp.ndarray): + elif isinstance(component, np.ndarray): if flat_eval: - assert component.ndim == 1, print(f"{component.ndim =}") + assert component.ndim == 1, print(f"{component.ndim = }") else: - assert component.ndim == 3, print(f"{component.ndim =}") + assert component.ndim == 3, print(f"{component.ndim = }") a_out += [component] elif isinstance(component, float): - a_out += [xp.array([component])[:, None, None]] + a_out += [np.array([component])[:, None, None]] - a_out = xp.array(a_out, dtype=float) + a_out = np.array(a_out, dtype=float) # numpy array: # 1d array (flat_eval=True and scalar input or flat_eval=False and length 1 (scalar) or length 3 (vector)) # 2d array (flat_eval=True and vector-valued input of shape (3,:)) # 3d array (flat_eval=False and scalar input) # 4d array (flat_eval=False and vector-valued input of shape (3,:,:,:)) - elif isinstance(a_in, xp.ndarray): + elif isinstance(a_in, np.ndarray): if flat_eval: if a_in.ndim == 1: a_out = a_in[None, :] @@ -1312,7 +1312,7 @@ def prepare_arg(a_in, *Xs, is_sparse_meshgrid=False, a_kwargs={}): else: raise ValueError( "Input array a_in must be either 1d (scalar) or \ - 2d (vector-valued, shape (3,:)) for flat evaluation!", + 2d (vector-valued, shape (3,:)) for flat evaluation!" ) else: @@ -1331,39 +1331,39 @@ def prepare_arg(a_in, *Xs, is_sparse_meshgrid=False, a_kwargs={}): else: raise ValueError( "Input array a_in must be either 3d (scalar) or \ - 4d (vector-valued, shape (3,:,:,:)) for non-flat evaluation!", + 4d (vector-valued, shape (3,:,:,:)) for non-flat evaluation!" ) else: raise TypeError( "Argument a must be either a float OR a list/tuple of 1 or 3 callable(s)/numpy array(s)/float(s) \ - OR a single numpy array OR a single callable!", + OR a single numpy array OR a single callable!" ) # make sure that output array is 2d and of shape (:, 1) or (:, 3) for flat evaluation if flat_eval: assert a_out.ndim == 2 assert a_out.shape[0] == 1 or a_out.shape[0] == 3 - a_out = xp.ascontiguousarray(xp.transpose(a_out, axes=(1, 0))).copy() # Make sure we don't have stride 0 + a_out = np.ascontiguousarray(np.transpose(a_out, axes=(1, 0))).copy() # Make sure we don't have stride 0 # make sure that output array is 4d and of shape (:,:,:, 1) or (:,:,:, 3) for tensor-product/slice evaluation else: assert a_out.ndim == 4 assert a_out.shape[0] == 1 or a_out.shape[0] == 3 - a_out = xp.ascontiguousarray( - xp.transpose(a_out, axes=(1, 2, 3, 0)), + a_out = np.ascontiguousarray( + np.transpose(a_out, axes=(1, 2, 3, 0)), ).copy() # Make sure we don't have stride 0 return a_out # ================================ - def get_params_numpy(self) -> xp.ndarray: + def get_params_numpy(self) -> np.ndarray: """Convert parameter dict into numpy array.""" params_numpy = [] for k, v in self.params.items(): params_numpy.append(v) - return xp.array(params_numpy) + return np.array(params_numpy) def show( self, @@ -1414,12 +1414,12 @@ def show( # plot domain without MPI decomposition and high resolution if grid_info is None: - e1 = xp.linspace(0.0, 1.0, 16) - e2 = xp.linspace(0.0, 1.0, 65) + e1 = np.linspace(0.0, 1.0, 16) + e2 = np.linspace(0.0, 1.0, 65) if logical: - E1, E2 = xp.meshgrid(e1, e2, indexing="ij") - X = xp.stack((E1, E2), axis=0) + E1, E2 = np.meshgrid(e1, e2, indexing="ij") + X = np.stack((E1, E2), axis=0) else: XYZ = self(e1, e2, 0.0, squeeze_out=True) @@ -1459,11 +1459,11 @@ def show( ) # top view - e3 = xp.linspace(0.0, 1.0, 65) + e3 = np.linspace(0.0, 1.0, 65) if logical: - E1, E2 = xp.meshgrid(e1, e2, indexing="ij") - X = xp.stack((E1, E2), axis=0) + E1, E2 = np.meshgrid(e1, e2, indexing="ij") + X = np.stack((E1, E2), axis=0) else: theta_0 = self(e1, 0.0, e3, squeeze_out=True) theta_pi = self(e1, 0.5, e3, squeeze_out=True) @@ -1524,7 +1524,7 @@ def show( # coordinates # e3 = [0., .25, .5, .75] # x, y, z = self(e1, e2, e3) - # R = xp.sqrt(x**2 + y**2) + # R = np.sqrt(x**2 + y**2) # fig = plt.figure(figsize=(13, 13)) # for n in range(4): @@ -1551,14 +1551,14 @@ def show( elif isinstance(grid_info, list): assert len(grid_info) > 1 - e1 = xp.linspace(0.0, 1.0, grid_info[0] + 1) - e2 = xp.linspace(0.0, 1.0, grid_info[1] + 1) + e1 = np.linspace(0.0, 1.0, grid_info[0] + 1) + e2 = np.linspace(0.0, 1.0, grid_info[1] + 1) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(1, 1, 1) if logical: - E1, E2 = xp.meshgrid(e1, e2, indexing="ij") + E1, E2 = np.meshgrid(e1, e2, indexing="ij") # eta1-isolines for i in range(e1.size): @@ -1586,7 +1586,7 @@ def show( ax.plot(X[co1, :, j], X[co2, :, j], "tab:blue", alpha=0.5) # plot domain with MPI decomposition - elif isinstance(grid_info, xp.ndarray): + elif isinstance(grid_info, np.ndarray): assert grid_info.ndim == 2 assert grid_info.shape[1] > 5 @@ -1594,7 +1594,7 @@ def show( ax = fig.add_subplot(1, 1, 1) for i in range(grid_info.shape[0]): - e1 = xp.linspace( + e1 = np.linspace( grid_info[i, 0], grid_info[i, 1], int( @@ -1602,7 +1602,7 @@ def show( ) + 1, ) - e2 = xp.linspace( + e2 = np.linspace( grid_info[i, 3], grid_info[i, 4], int( @@ -1612,7 +1612,7 @@ def show( ) if logical: - E1, E2 = xp.meshgrid(e1, e2, indexing="ij") + E1, E2 = np.meshgrid(e1, e2, indexing="ij") # eta1-isolines first_line = ax.plot( @@ -1737,7 +1737,7 @@ def show( ax.axis("equal") - if isinstance(grid_info, xp.ndarray): + if isinstance(grid_info, np.ndarray): plt.legend() if self.__class__.__name__ in torus_mappings: @@ -1772,9 +1772,9 @@ def __init__( Nel: tuple[int] = (8, 24, 6), p: tuple[int] = (2, 3, 1), spl_kind: tuple[bool] = (False, True, True), - cx: xp.ndarray = None, - cy: xp.ndarray = None, - cz: xp.ndarray = None, + cx: np.ndarray = None, + cy: np.ndarray = None, + cz: np.ndarray = None, ): self.kind_map = 0 @@ -1805,7 +1805,7 @@ def __init__( assert self.cz.shape == expected_shape # identify polar singularity at eta1=0 - if xp.all(self.cx[0, :, 0] == self.cx[0, 0, 0]): + if np.all(self.cx[0, :, 0] == self.cx[0, 0, 0]): self.pole = True else: self.pole = False @@ -1836,17 +1836,17 @@ def __init__( Nel: tuple[int] = (8, 24), p: tuple[int] = (2, 3), spl_kind: tuple[bool] = (False, True), - cx: xp.ndarray = None, - cy: xp.ndarray = None, + cx: np.ndarray = None, + cy: np.ndarray = None, ): # get default control points if cx is None or cy is None: def X(eta1, eta2): - return eta1 * xp.cos(2 * xp.pi * eta2) + 3.0 + return eta1 * np.cos(2 * np.pi * eta2) + 3.0 def Y(eta1, eta2): - return eta1 * xp.sin(2 * xp.pi * eta2) + return eta1 * np.sin(2 * np.pi * eta2) cx, cy = interp_mapping(Nel, p, spl_kind, X, Y) @@ -1869,7 +1869,7 @@ def Y(eta1, eta2): assert self.cy.shape == expected_shape # identify polar singularity at eta1=0 - if xp.all(self.cx[0, :] == self.cx[0, 0]): + if np.all(self.cx[0, :] == self.cx[0, 0]): self.pole = True else: self.pole = False @@ -1877,7 +1877,7 @@ def Y(eta1, eta2): # reshape control points to 3D self._cx = self.cx[:, :, None] self._cy = self.cy[:, :, None] - self._cz = xp.zeros((1, 1, 1), dtype=float) + self._cz = np.zeros((1, 1, 1), dtype=float) # init base class super().__init__(Nel=Nel, p=p, spl_kind=spl_kind) @@ -1902,8 +1902,8 @@ def __init__( Nel: tuple[int] = (8, 24), p: tuple[int] = (2, 3), spl_kind: tuple[bool] = (False, True), - cx: xp.ndarray = None, - cy: xp.ndarray = None, + cx: np.ndarray = None, + cy: np.ndarray = None, Lz: float = 4.0, ): self.kind_map = 1 @@ -1912,10 +1912,10 @@ def __init__( if cx is None or cy is None: def X(eta1, eta2): - return eta1 * xp.cos(2 * xp.pi * eta2) + return eta1 * np.cos(2 * np.pi * eta2) def Y(eta1, eta2): - return eta1 * xp.sin(2 * xp.pi * eta2) + return eta1 * np.sin(2 * np.pi * eta2) cx, cy = interp_mapping(Nel, p, spl_kind, X, Y) @@ -1923,7 +1923,7 @@ def Y(eta1, eta2): cx[0] = 0.0 cy[0] = 0.0 - self.params_numpy = xp.array([Lz]) + self.params_numpy = np.array([Lz]) self.periodic_eta3 = False # init base class @@ -1954,7 +1954,7 @@ class PoloidalSplineTorus(PoloidalSpline): spl_kind : tuple[bool] Kind of spline in each poloidal direction (True=periodic, False=clamped). - cx, cy : xp.ndarray + cx, cy : np.ndarray Control points (spline coefficients) of the poloidal mapping. If None, a default square-to-disc mapping of radius 1 centered around (x, y) = (3, 0) is interpolated. @@ -1967,23 +1967,23 @@ def __init__( Nel: tuple[int] = (8, 24), p: tuple[int] = (2, 3), spl_kind: tuple[bool] = (False, True), - cx: xp.ndarray = None, - cy: xp.ndarray = None, + cx: np.ndarray = None, + cy: np.ndarray = None, tor_period: int = 3, ): # use setters for mapping attributes self.kind_map = 2 - self.params_numpy = xp.array([float(tor_period)]) + self.params_numpy = np.array([float(tor_period)]) self.periodic_eta3 = True # get default control points if cx is None or cy is None: def X(eta1, eta2): - return eta1 * xp.cos(2 * xp.pi * eta2) + 3.0 + return eta1 * np.cos(2 * np.pi * eta2) + 3.0 def Y(eta1, eta2): - return eta1 * xp.sin(2 * xp.pi * eta2) + return eta1 * np.sin(2 * np.pi * eta2) cx, cy = interp_mapping(Nel, p, spl_kind, X, Y) @@ -2025,7 +2025,7 @@ def interp_mapping(Nel, p, spl_kind, X, Y, Z=None): NbaseN = [Nel + p - kind * p for Nel, p, kind in zip(Nel, p, spl_kind)] # element boundaries - el_b = [xp.linspace(0.0, 1.0, Nel + 1) for Nel in Nel] + el_b = [np.linspace(0.0, 1.0, Nel + 1) for Nel in Nel] # spline knot vectors T = [bsp.make_knots(el_b, p, kind) for el_b, p, kind in zip(el_b, p, spl_kind)] @@ -2040,7 +2040,7 @@ def interp_mapping(Nel, p, spl_kind, X, Y, Z=None): if len(Nel) == 2: I = kron(I_mat[0], I_mat[1], format="csc") - I_pts = xp.meshgrid(I_pts[0], I_pts[1], indexing="ij") + I_pts = np.meshgrid(I_pts[0], I_pts[1], indexing="ij") cx = spsolve(I, X(I_pts[0], I_pts[1]).flatten()).reshape( NbaseN[0], @@ -2073,7 +2073,7 @@ def interp_mapping(Nel, p, spl_kind, X, Y, Z=None): return 0.0 -def spline_interpolation_nd(p: list, spl_kind: list, grids_1d: list, values: xp.ndarray): +def spline_interpolation_nd(p: list, spl_kind: list, grids_1d: list, values: np.ndarray): """n-dimensional tensor-product spline interpolation with discrete input. The interpolation points are passed as a list of 1d arrays, each array with increasing entries g[0]=0 < g[1] < ... @@ -2095,7 +2095,7 @@ def spline_interpolation_nd(p: list, spl_kind: list, grids_1d: list, values: xp. Returns -------- - coeffs : xp.array + coeffs : np.array spline coefficients as nd array. T : list[array] @@ -2110,11 +2110,11 @@ def spline_interpolation_nd(p: list, spl_kind: list, grids_1d: list, values: xp. I_mat = [] I_LU = [] for sh, x_grid, p_i, kind_i in zip(values.shape, grids_1d, p, spl_kind): - assert isinstance(x_grid, xp.ndarray) + assert isinstance(x_grid, np.ndarray) assert sh == x_grid.size assert ( - xp.all( - xp.roll(x_grid, 1)[1:] < x_grid[1:], + np.all( + np.roll(x_grid, 1)[1:] < x_grid[1:], ) and x_grid[-1] > x_grid[-2] ) @@ -2122,17 +2122,17 @@ def spline_interpolation_nd(p: list, spl_kind: list, grids_1d: list, values: xp. if kind_i: assert x_grid[-1] < 1.0, "Interpolation points must be <1 for periodic interpolation." - breaks = xp.ones(x_grid.size + 1) + breaks = np.ones(x_grid.size + 1) if p_i % 2 == 0: - breaks[1:-1] = (x_grid[1:] + xp.roll(x_grid, 1)[1:]) / 2.0 + breaks[1:-1] = (x_grid[1:] + np.roll(x_grid, 1)[1:]) / 2.0 breaks[0] = 0.0 else: breaks[:-1] = x_grid else: assert ( - xp.abs( + np.abs( x_grid[-1] - 1.0, ) < 1e-14 @@ -2149,12 +2149,12 @@ def spline_interpolation_nd(p: list, spl_kind: list, grids_1d: list, values: xp. breaks[0] = 0.0 breaks[-1] = 1.0 - # breaks = xp.linspace(0., 1., x_grid.size - (not kind_i)*p_i + 1) + # breaks = np.linspace(0., 1., x_grid.size - (not kind_i)*p_i + 1) T += [bsp.make_knots(breaks, p_i, periodic=kind_i)] indN += [ - (xp.indices((breaks.size - 1, p_i + 1))[1] + xp.arange(breaks.size - 1)[:, None]) % x_grid.size, + (np.indices((breaks.size - 1, p_i + 1))[1] + np.arange(breaks.size - 1)[:, None]) % x_grid.size, ] I_mat += [bsp.collocation_matrix(T[-1], p_i, x_grid, periodic=kind_i)] diff --git a/src/struphy/geometry/domains.py b/src/struphy/geometry/domains.py index 20f995779..02bad971f 100644 --- a/src/struphy/geometry/domains.py +++ b/src/struphy/geometry/domains.py @@ -2,8 +2,6 @@ import copy -import cunumpy as xp - from struphy.fields_background.base import AxisymmMHDequilibrium from struphy.fields_background.equils import EQDSKequilibrium from struphy.geometry.base import ( @@ -14,6 +12,7 @@ interp_mapping, ) from struphy.geometry.utilities import field_line_tracing +from struphy.utils.arrays import xp as np class Tokamak(PoloidalSplineTorus): @@ -157,8 +156,8 @@ def __init__(self, gvec_equil=None): def XYZ(e1, e2, e3): rho = _rmin + e1 * (1.0 - _rmin) - theta = 2 * xp.pi * e2 - zeta = 2 * xp.pi * e3 / gvec_equil._nfp + theta = 2 * np.pi * e2 + zeta = 2 * np.pi * e3 / gvec_equil._nfp if gvec_equil.params["use_boozer"]: ev = gvec.EvaluationsBoozer(rho=rho, theta_B=theta, zeta_B=zeta, state=gvec_equil.state) else: @@ -287,10 +286,10 @@ def __init__( # get control points def X(eta1, eta2): - return a * eta1 * xp.cos(2 * xp.pi * eta2) + return a * eta1 * np.cos(2 * np.pi * eta2) def Y(eta1, eta2): - return a * eta1 * xp.sin(2 * xp.pi * eta2) + return a * eta1 * np.sin(2 * np.pi * eta2) spl_kind = (False, True) @@ -364,17 +363,17 @@ def __init__( if sfl: def theta(eta1, eta2): - return 2 * xp.arctan(xp.sqrt((1 + a * eta1 / R0) / (1 - a * eta1 / R0)) * xp.tan(xp.pi * eta2)) + return 2 * np.arctan(np.sqrt((1 + a * eta1 / R0) / (1 - a * eta1 / R0)) * np.tan(np.pi * eta2)) else: def theta(eta1, eta2): - return 2 * xp.pi * eta2 + return 2 * np.pi * eta2 def R(eta1, eta2): - return a * eta1 * xp.cos(theta(eta1, eta2)) + R0 + return a * eta1 * np.cos(theta(eta1, eta2)) + R0 def Z(eta1, eta2): - return a * eta1 * xp.sin(theta(eta1, eta2)) + return a * eta1 * np.sin(theta(eta1, eta2)) spl_kind = (False, True) @@ -412,15 +411,29 @@ class Cuboid(Domain): l1 : float Start of x-interval (default: 0.). r1 : float - End of x-interval, r1>l1 (default: 1.). + End of x-interval, r1>l1 (default: 2.). l2 : float Start of y-interval (default: 0.). r2 : float - End of y-interval, r2>l2 (default: 1.). + End of y-interval, r2>l2 (default: 3.). l3 : float Start of z-interval (default: 0.). r3 : float - End of z-interval, r3>l3 (default: 1.). + End of z-interval, r3>l3 (default: 6.). + + Note + ---- + In the parameter .yml, use the following in the section `geometry`:: + + geometry : + type : Cuboid + Cuboid : + l1 : 0. # start of x-interval + r1 : 2. # end of x-interval, r1>l1 + l2 : 0. # start of y-interval + r2 : 2. # end of y-interval, r2>l2 + l3 : 0. # start of z-interval + r3 : 1. # end of z-interval, r3>l3 """ def __init__( @@ -743,11 +756,11 @@ def __init__( self.params = copy.deepcopy(locals()) self.params_numpy = self.get_params_numpy() - assert a2 <= R0, f"The minor radius must be smaller or equal than the major radius! {a2 =}, {R0 =}" + assert a2 <= R0, f"The minor radius must be smaller or equal than the major radius! {a2 = }, {R0 = }" if sfl: assert pol_period == 1, ( - "Piece-of-cake is only implemented for torus coordinates, not for straight field line coordinates!" + f"Piece-of-cake is only implemented for torus coordinates, not for straight field line coordinates!" ) # periodicity in eta3-direction and pole at eta1=0 @@ -763,24 +776,24 @@ def __init__( def inverse_map(self, x, y, z, bounded=True, change_out_order=False): """Analytical inverse map of HollowTorus""" - mr = xp.sqrt(x**2 + y**2) - self.params["R0"] + mr = np.sqrt(x**2 + y**2) - self.params["R0"] - eta3 = xp.arctan2(-y, x) % (2 * xp.pi / self.params["tor_period"]) / (2 * xp.pi) * self.params["tor_period"] - eta2 = xp.arctan2(z, mr) % (2 * xp.pi / self.params["pol_period"]) / (2 * xp.pi / self.params["pol_period"]) - eta1 = (z / xp.sin(2 * xp.pi * eta2 / self.params["pol_period"]) - self.params["a1"]) / ( + eta3 = np.arctan2(-y, x) % (2 * np.pi / self.params["tor_period"]) / (2 * np.pi) * self.params["tor_period"] + eta2 = np.arctan2(z, mr) % (2 * np.pi / self.params["pol_period"]) / (2 * np.pi / self.params["pol_period"]) + eta1 = (z / np.sin(2 * np.pi * eta2 / self.params["pol_period"]) - self.params["a1"]) / ( self.params["a2"] - self.params["a1"] ) if bounded: eta1[eta1 > 1] = 1.0 eta1[eta1 < 0] = 0.0 - assert xp.all(xp.logical_and(eta1 >= 0, eta1 <= 1)) + assert np.all(np.logical_and(eta1 >= 0, eta1 <= 1)) - assert xp.all(xp.logical_and(eta2 >= 0, eta2 <= 1)) - assert xp.all(xp.logical_and(eta3 >= 0, eta3 <= 1)) + assert np.all(np.logical_and(eta2 >= 0, eta2 <= 1)) + assert np.all(np.logical_and(eta3 >= 0, eta3 <= 1)) if change_out_order: - return xp.transpose((eta1, eta2, eta3)) + return np.transpose((eta1, eta2, eta3)) else: return eta1, eta2, eta3 diff --git a/src/struphy/geometry/evaluation_kernels.py b/src/struphy/geometry/evaluation_kernels.py index 4f97b9ce9..a357bbea1 100644 --- a/src/struphy/geometry/evaluation_kernels.py +++ b/src/struphy/geometry/evaluation_kernels.py @@ -31,7 +31,7 @@ def f( args: DomainArguments Arguments for the mapping. - f_out : xp.array + f_out : np.array Output array of shape (3,). """ @@ -196,7 +196,7 @@ def df( args: DomainArguments Arguments for the mapping. - df_out : xp.array + df_out : np.array Output array of shape (3, 3). """ @@ -354,7 +354,7 @@ def det_df( args: DomainArguments Arguments for the mapping. - tmp1 : xp.array + tmp1 : np.array Temporary array of shape (3, 3). """ @@ -388,13 +388,13 @@ def df_inv( args: DomainArguments Arguments for the mapping. - tmp1: xp.array + tmp1: np.array Temporary array of shape (3, 3). avoid_round_off: bool Whether to manually set exact zeros in arrays. - dfinv_out: xp.array + dfinv_out: np.array Output array of shape (3, 3). """ @@ -484,13 +484,13 @@ def g( args: DomainArguments Arguments for the mapping. - tmp1, tmp2: xp.array + tmp1, tmp2: np.array Temporary arrays of shape (3, 3). avoid_round_off: bool Whether to manually set exact zeros in arrays. - g_out: xp.array + g_out: np.array Output array of shape (3, 3). """ df( @@ -601,13 +601,13 @@ def g_inv( args: DomainArguments Arguments for the mapping. - tmp1, tmp2, tmp3: xp.array + tmp1, tmp2, tmp3: np.array Temporary arrays of shape (3, 3). avoid_round_off: bool Whether to manually set exact zeros in arrays. - ginv_out: xp.array + ginv_out: np.array Output array of shape (3, 3). """ g( @@ -732,16 +732,16 @@ def select_metric_coeff( args: DomainArguments Arguments for the mapping. - tmp0: xp.array + tmp0: np.array Temporary array of shape (3,). - tmp1, tmp2, tmp3: xp.array + tmp1, tmp2, tmp3: np.array Temporary arrays of shape (3, 3). avoid_round_off: bool Whether to manually set exact zeros in arrays. - out: xp.array + out: np.array Output array of shape (3, 3). """ # identity map diff --git a/src/struphy/geometry/mappings_kernels.py b/src/struphy/geometry/mappings_kernels.py index 83e6275fd..8b643855d 100644 --- a/src/struphy/geometry/mappings_kernels.py +++ b/src/struphy/geometry/mappings_kernels.py @@ -49,40 +49,13 @@ def spline_3d( tmp3 = ind3[span3 - int(p[2]), :] f_out[0] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - b2, - b3, - tmp1, - tmp2, - tmp3, - args.cx, + int(p[0]), int(p[1]), int(p[2]), b1, b2, b3, tmp1, tmp2, tmp3, args.cx ) f_out[1] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - b2, - b3, - tmp1, - tmp2, - tmp3, - args.cy, + int(p[0]), int(p[1]), int(p[2]), b1, b2, b3, tmp1, tmp2, tmp3, args.cy ) f_out[2] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - b2, - b3, - tmp1, - tmp2, - tmp3, - args.cz, + int(p[0]), int(p[1]), int(p[2]), b1, b2, b3, tmp1, tmp2, tmp3, args.cz ) @@ -124,112 +97,31 @@ def spline_3d_df( tmp3 = ind3[span3 - int(p[2]), :] df_out[0, 0] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - der1, - b2, - b3, - tmp1, - tmp2, - tmp3, - args.cx, + int(p[0]), int(p[1]), int(p[2]), der1, b2, b3, tmp1, tmp2, tmp3, args.cx ) df_out[0, 1] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - der2, - b3, - tmp1, - tmp2, - tmp3, - args.cx, + int(p[0]), int(p[1]), int(p[2]), b1, der2, b3, tmp1, tmp2, tmp3, args.cx ) df_out[0, 2] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - b2, - der3, - tmp1, - tmp2, - tmp3, - args.cx, + int(p[0]), int(p[1]), int(p[2]), b1, b2, der3, tmp1, tmp2, tmp3, args.cx ) df_out[1, 0] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - der1, - b2, - b3, - tmp1, - tmp2, - tmp3, - args.cy, + int(p[0]), int(p[1]), int(p[2]), der1, b2, b3, tmp1, tmp2, tmp3, args.cy ) df_out[1, 1] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - der2, - b3, - tmp1, - tmp2, - tmp3, - args.cy, + int(p[0]), int(p[1]), int(p[2]), b1, der2, b3, tmp1, tmp2, tmp3, args.cy ) df_out[1, 2] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - b2, - der3, - tmp1, - tmp2, - tmp3, - args.cy, + int(p[0]), int(p[1]), int(p[2]), b1, b2, der3, tmp1, tmp2, tmp3, args.cy ) df_out[2, 0] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - der1, - b2, - b3, - tmp1, - tmp2, - tmp3, - args.cz, + int(p[0]), int(p[1]), int(p[2]), der1, b2, b3, tmp1, tmp2, tmp3, args.cz ) df_out[2, 1] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - der2, - b3, - tmp1, - tmp2, - tmp3, - args.cz, + int(p[0]), int(p[1]), int(p[2]), b1, der2, b3, tmp1, tmp2, tmp3, args.cz ) df_out[2, 2] = evaluation_kernels_3d.evaluation_kernel_3d( - int(p[0]), - int(p[1]), - int(p[2]), - b1, - b2, - der3, - tmp1, - tmp2, - tmp3, - args.cz, + int(p[0]), int(p[1]), int(p[2]), b1, b2, der3, tmp1, tmp2, tmp3, args.cz ) @@ -384,7 +276,7 @@ def spline_2d_torus( tmp2 = ind2[span2 - int(p[1]), :] f_out[0] = evaluation_kernels_2d.evaluation_kernel_2d(int(p[0]), int(p[1]), b1, b2, tmp1, tmp2, cx) * cos( - 2 * pi * eta3 / tor_period, + 2 * pi * eta3 / tor_period ) f_out[1] = ( evaluation_kernels_2d.evaluation_kernel_2d(int(p[0]), int(p[1]), b1, b2, tmp1, tmp2, cx) @@ -437,10 +329,10 @@ def spline_2d_torus_df( tmp2 = ind2[span2 - int(p[1]), :] df_out[0, 0] = evaluation_kernels_2d.evaluation_kernel_2d(int(p[0]), int(p[1]), der1, b2, tmp1, tmp2, cx) * cos( - 2 * pi * eta3 / tor_period, + 2 * pi * eta3 / tor_period ) df_out[0, 1] = evaluation_kernels_2d.evaluation_kernel_2d(int(p[0]), int(p[1]), b1, der2, tmp1, tmp2, cx) * cos( - 2 * pi * eta3 / tor_period, + 2 * pi * eta3 / tor_period ) df_out[0, 2] = ( evaluation_kernels_2d.evaluation_kernel_2d(int(p[0]), int(p[1]), b1, b2, tmp1, tmp2, cx) @@ -729,14 +621,7 @@ def hollow_cyl_df(eta1: float, eta2: float, a1: float, a2: float, lz: float, poc @pure def powered_ellipse( - eta1: float, - eta2: float, - eta3: float, - rx: float, - ry: float, - lz: float, - s: float, - f_out: "float[:]", + eta1: float, eta2: float, eta3: float, rx: float, ry: float, lz: float, s: float, f_out: "float[:]" ): r""" Point-wise evaluation of @@ -779,14 +664,7 @@ def powered_ellipse( @pure def powered_ellipse_df( - eta1: float, - eta2: float, - eta3: float, - rx: float, - ry: float, - lz: float, - s: float, - df_out: "float[:,:]", + eta1: float, eta2: float, eta3: float, rx: float, ry: float, lz: float, s: float, df_out: "float[:,:]" ): """Jacobian matrix for :meth:`struphy.geometry.mappings_kernels.powered_ellipse`.""" @@ -965,14 +843,7 @@ def hollow_torus_df( @pure def shafranov_shift( - eta1: float, - eta2: float, - eta3: float, - rx: float, - ry: float, - lz: float, - de: float, - f_out: "float[:]", + eta1: float, eta2: float, eta3: float, rx: float, ry: float, lz: float, de: float, f_out: "float[:]" ): r""" Point-wise evaluation of @@ -1016,14 +887,7 @@ def shafranov_shift( @pure def shafranov_shift_df( - eta1: float, - eta2: float, - eta3: float, - rx: float, - ry: float, - lz: float, - de: float, - df_out: "float[:,:]", + eta1: float, eta2: float, eta3: float, rx: float, ry: float, lz: float, de: float, df_out: "float[:,:]" ): """Jacobian matrix for :meth:`struphy.geometry.mappings_kernels.shafranov_shift`.""" @@ -1040,14 +904,7 @@ def shafranov_shift_df( @pure def shafranov_sqrt( - eta1: float, - eta2: float, - eta3: float, - rx: float, - ry: float, - lz: float, - de: float, - f_out: "float[:]", + eta1: float, eta2: float, eta3: float, rx: float, ry: float, lz: float, de: float, f_out: "float[:]" ): r""" Point-wise evaluation of @@ -1089,14 +946,7 @@ def shafranov_sqrt( @pure def shafranov_sqrt_df( - eta1: float, - eta2: float, - eta3: float, - rx: float, - ry: float, - lz: float, - de: float, - df_out: "float[:,:]", + eta1: float, eta2: float, eta3: float, rx: float, ry: float, lz: float, de: float, df_out: "float[:,:]" ): """Jacobian matrix for :meth:`struphy.geometry.mappings_kernels.shafranov_sqrt`.""" diff --git a/src/struphy/geometry/tests/test_domain.py b/src/struphy/geometry/tests/test_domain.py index c9a489331..48348958b 100644 --- a/src/struphy/geometry/tests/test_domain.py +++ b/src/struphy/geometry/tests/test_domain.py @@ -4,9 +4,8 @@ def test_prepare_arg(): """Tests prepare_arg static method in domain base class.""" - import cunumpy as xp - from struphy.geometry.base import Domain + from struphy.utils.arrays import xp as np def a1(e1, e2, e3): return e1 * e2 @@ -22,12 +21,12 @@ def a_vec(e1, e2, e3): a_2 = e2 * e3 a_3 = e3 * e1 - return xp.stack((a_1, a_2, a_3), axis=0) + return np.stack((a_1, a_2, a_3), axis=0) # ========== tensor-product/slice evaluation =============== - e1 = xp.random.rand(4) - e2 = xp.random.rand(5) - e3 = xp.random.rand(6) + e1 = np.random.rand(4) + e2 = np.random.rand(5) + e3 = np.random.rand(6) E1, E2, E3, is_sparse_meshgrid = Domain.prepare_eval_pts(e1, e2, e3, flat_eval=False) @@ -85,7 +84,7 @@ def a_vec(e1, e2, e3): assert Domain.prepare_arg([A1, A2, A3], E1, E2, E3).shape == shape_vector # ============== markers evaluation ========================== - markers = xp.random.rand(10, 6) + markers = np.random.rand(10, 6) shape_scalar = (markers.shape[0], 1) shape_vector = (markers.shape[0], 3) @@ -159,16 +158,15 @@ def a_vec(e1, e2, e3): def test_evaluation_mappings(mapping): """Tests domain object creation with default parameters and evaluation of metric coefficients.""" - import cunumpy as xp - from struphy.geometry import domains from struphy.geometry.base import Domain + from struphy.utils.arrays import xp as np # arrays: - arr1 = xp.linspace(0.0, 1.0, 4) - arr2 = xp.linspace(0.0, 1.0, 5) - arr3 = xp.linspace(0.0, 1.0, 6) - arrm = xp.random.rand(10, 8) + arr1 = np.linspace(0.0, 1.0, 4) + arr2 = np.linspace(0.0, 1.0, 5) + arr3 = np.linspace(0.0, 1.0, 6) + arrm = np.random.rand(10, 8) print() print('Testing "evaluate"...') print("array shapes:", arr1.shape, arr2.shape, arr3.shape, arrm.shape) @@ -264,9 +262,9 @@ def test_evaluation_mappings(mapping): assert domain.metric_inv(arr1, arr2, arr3).shape == (3, 3) + arr1.shape + arr2.shape + arr3.shape # matrix evaluations at one point in third direction - mat12_x, mat12_y = xp.meshgrid(arr1, arr2, indexing="ij") - mat13_x, mat13_z = xp.meshgrid(arr1, arr3, indexing="ij") - mat23_y, mat23_z = xp.meshgrid(arr2, arr3, indexing="ij") + mat12_x, mat12_y = np.meshgrid(arr1, arr2, indexing="ij") + mat13_x, mat13_z = np.meshgrid(arr1, arr3, indexing="ij") + mat23_y, mat23_z = np.meshgrid(arr2, arr3, indexing="ij") # eta1-eta2 matrix evaluation: print("eta1-eta2 matrix evaluation, shape:", domain(mat12_x, mat12_y, 0.5, squeeze_out=True).shape) @@ -296,7 +294,7 @@ def test_evaluation_mappings(mapping): assert domain.metric_inv(0.5, mat23_y, mat23_z, squeeze_out=True).shape == (3, 3) + mat23_y.shape # matrix evaluations for sparse meshgrid - mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing="ij", sparse=True) + mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing="ij", sparse=True) print("sparse meshgrid matrix evaluation, shape:", domain(mat_x, mat_y, mat_z).shape) assert domain(mat_x, mat_y, mat_z).shape == (3,) + (mat_x.shape[0], mat_y.shape[1], mat_z.shape[2]) assert domain.jacobian(mat_x, mat_y, mat_z).shape == (3, 3) + (mat_x.shape[0], mat_y.shape[1], mat_z.shape[2]) @@ -306,7 +304,7 @@ def test_evaluation_mappings(mapping): assert domain.metric_inv(mat_x, mat_y, mat_z).shape == (3, 3) + (mat_x.shape[0], mat_y.shape[1], mat_z.shape[2]) # matrix evaluations - mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing="ij") + mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing="ij") print("matrix evaluation, shape:", domain(mat_x, mat_y, mat_z).shape) assert domain(mat_x, mat_y, mat_z).shape == (3,) + mat_x.shape assert domain.jacobian(mat_x, mat_y, mat_z).shape == (3, 3) + mat_x.shape @@ -319,24 +317,23 @@ def test_evaluation_mappings(mapping): def test_pullback(): """Tests pullbacks to p-forms.""" - import cunumpy as xp - from struphy.geometry import domains from struphy.geometry.base import Domain + from struphy.utils.arrays import xp as np # arrays: - arr1 = xp.linspace(0.0, 1.0, 4) - arr2 = xp.linspace(0.0, 1.0, 5) - arr3 = xp.linspace(0.0, 1.0, 6) + arr1 = np.linspace(0.0, 1.0, 4) + arr2 = np.linspace(0.0, 1.0, 5) + arr3 = np.linspace(0.0, 1.0, 6) print() print('Testing "pull"...') print("array shapes:", arr1.shape, arr2.shape, arr3.shape) - markers = xp.random.rand(13, 6) + markers = np.random.rand(13, 6) # physical function to pull back (used as components of forms too): def fun(x, y, z): - return xp.exp(x) * xp.sin(y) * xp.cos(z) + return np.exp(x) * np.sin(y) * np.cos(z) domain_class = getattr(domains, "Colella") domain = domain_class() @@ -424,9 +421,9 @@ def fun(x, y, z): ) # matrix pullbacks at one point in third direction - mat12_x, mat12_y = xp.meshgrid(arr1, arr2, indexing="ij") - mat13_x, mat13_z = xp.meshgrid(arr1, arr3, indexing="ij") - mat23_y, mat23_z = xp.meshgrid(arr2, arr3, indexing="ij") + mat12_x, mat12_y = np.meshgrid(arr1, arr2, indexing="ij") + mat13_x, mat13_z = np.meshgrid(arr1, arr3, indexing="ij") + mat23_y, mat23_z = np.meshgrid(arr2, arr3, indexing="ij") # eta1-eta2 matrix pullback: if p_str == "0" or p_str == "3": @@ -453,7 +450,7 @@ def fun(x, y, z): ) # matrix pullbacks for sparse meshgrid - mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing="ij", sparse=True) + mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing="ij", sparse=True) if p_str == "0" or p_str == "3": assert domain.pull(fun_form, mat_x, mat_y, mat_z, kind=p_str).shape == ( mat_x.shape[0], @@ -469,7 +466,7 @@ def fun(x, y, z): ) # matrix pullbacks - mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing="ij") + mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing="ij") if p_str == "0" or p_str == "3": assert domain.pull(fun_form, mat_x, mat_y, mat_z, kind=p_str).shape == mat_x.shape else: @@ -479,24 +476,23 @@ def fun(x, y, z): def test_pushforward(): """Tests pushforward of p-forms.""" - import cunumpy as xp - from struphy.geometry import domains from struphy.geometry.base import Domain + from struphy.utils.arrays import xp as np # arrays: - arr1 = xp.linspace(0.0, 1.0, 4) - arr2 = xp.linspace(0.0, 1.0, 5) - arr3 = xp.linspace(0.0, 1.0, 6) + arr1 = np.linspace(0.0, 1.0, 4) + arr2 = np.linspace(0.0, 1.0, 5) + arr3 = np.linspace(0.0, 1.0, 6) print() print('Testing "push"...') print("array shapes:", arr1.shape, arr2.shape, arr3.shape) - markers = xp.random.rand(13, 6) + markers = np.random.rand(13, 6) # logical function to push (used as components of forms too): def fun(e1, e2, e3): - return xp.exp(e1) * xp.sin(e2) * xp.cos(e3) + return np.exp(e1) * np.sin(e2) * np.cos(e3) domain_class = getattr(domains, "Colella") domain = domain_class() @@ -584,9 +580,9 @@ def fun(e1, e2, e3): ) # matrix pushs at one point in third direction - mat12_x, mat12_y = xp.meshgrid(arr1, arr2, indexing="ij") - mat13_x, mat13_z = xp.meshgrid(arr1, arr3, indexing="ij") - mat23_y, mat23_z = xp.meshgrid(arr2, arr3, indexing="ij") + mat12_x, mat12_y = np.meshgrid(arr1, arr2, indexing="ij") + mat13_x, mat13_z = np.meshgrid(arr1, arr3, indexing="ij") + mat23_y, mat23_z = np.meshgrid(arr2, arr3, indexing="ij") # eta1-eta2 matrix push: if p_str == "0" or p_str == "3": @@ -613,7 +609,7 @@ def fun(e1, e2, e3): ) # matrix pushs for sparse meshgrid - mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing="ij", sparse=True) + mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing="ij", sparse=True) if p_str == "0" or p_str == "3": assert domain.push(fun_form, mat_x, mat_y, mat_z, kind=p_str).shape == ( mat_x.shape[0], @@ -629,7 +625,7 @@ def fun(e1, e2, e3): ) # matrix pushs - mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing="ij") + mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing="ij") if p_str == "0" or p_str == "3": assert domain.push(fun_form, mat_x, mat_y, mat_z, kind=p_str).shape == mat_x.shape else: @@ -639,24 +635,23 @@ def fun(e1, e2, e3): def test_transform(): """Tests transformation of p-forms.""" - import cunumpy as xp - from struphy.geometry import domains from struphy.geometry.base import Domain + from struphy.utils.arrays import xp as np # arrays: - arr1 = xp.linspace(0.0, 1.0, 4) - arr2 = xp.linspace(0.0, 1.0, 5) - arr3 = xp.linspace(0.0, 1.0, 6) + arr1 = np.linspace(0.0, 1.0, 4) + arr2 = np.linspace(0.0, 1.0, 5) + arr3 = np.linspace(0.0, 1.0, 6) print() print('Testing "transform"...') print("array shapes:", arr1.shape, arr2.shape, arr3.shape) - markers = xp.random.rand(13, 6) + markers = np.random.rand(13, 6) # logical function to push (used as components of forms too): def fun(e1, e2, e3): - return xp.exp(e1) * xp.sin(e2) * xp.cos(e3) + return np.exp(e1) * np.sin(e2) * np.cos(e3) domain_class = getattr(domains, "Colella") domain = domain_class() @@ -756,9 +751,9 @@ def fun(e1, e2, e3): ) # matrix transforms at one point in third direction - mat12_x, mat12_y = xp.meshgrid(arr1, arr2, indexing="ij") - mat13_x, mat13_z = xp.meshgrid(arr1, arr3, indexing="ij") - mat23_y, mat23_z = xp.meshgrid(arr2, arr3, indexing="ij") + mat12_x, mat12_y = np.meshgrid(arr1, arr2, indexing="ij") + mat13_x, mat13_z = np.meshgrid(arr1, arr3, indexing="ij") + mat23_y, mat23_z = np.meshgrid(arr2, arr3, indexing="ij") # eta1-eta2 matrix transform: if p_str == "0_to_3" or p_str == "3_to_0": @@ -794,7 +789,7 @@ def fun(e1, e2, e3): ) # matrix transforms for sparse meshgrid - mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing="ij", sparse=True) + mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing="ij", sparse=True) if p_str == "0_to_3" or p_str == "3_to_0": assert domain.transform(fun_form, mat_x, mat_y, mat_z, kind=p_str).shape == ( mat_x.shape[0], @@ -810,7 +805,7 @@ def fun(e1, e2, e3): ) # matrix transforms - mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing="ij") + mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing="ij") if p_str == "0_to_3" or p_str == "3_to_0": assert domain.transform(fun_form, mat_x, mat_y, mat_z, kind=p_str).shape == mat_x.shape else: @@ -822,18 +817,18 @@ def fun(e1, e2, e3): # """ # # from struphy.geometry import domains -# import cunumpy as xp +# from struphy.utils.arrays import xp as np # # # arrays: -# arr1 = xp.linspace(0., 1., 4) -# arr2 = xp.linspace(0., 1., 5) -# arr3 = xp.linspace(0., 1., 6) +# arr1 = np.linspace(0., 1., 4) +# arr2 = np.linspace(0., 1., 5) +# arr3 = np.linspace(0., 1., 6) # print() # print('Testing "transform"...') # print('array shapes:', arr1.shape, arr2.shape, arr3.shape) # # # logical function to tranform (used as components of forms too): -# fun = lambda eta1, eta2, eta3: xp.exp(eta1)*xp.sin(eta2)*xp.cos(eta3) +# fun = lambda eta1, eta2, eta3: np.exp(eta1)*np.sin(eta2)*np.cos(eta3) # # domain_class = getattr(domains, 'Colella') # domain = domain_class() @@ -890,9 +885,9 @@ def fun(e1, e2, e3): # assert a.shape[0] == arr1.size and a.shape[1] == arr2.size and a.shape[2] == arr3.size # # # matrix transformation at one point in third direction -# mat12_x, mat12_y = xp.meshgrid(arr1, arr2, indexing='ij') -# mat13_x, mat13_z = xp.meshgrid(arr1, arr3, indexing='ij') -# mat23_y, mat23_z = xp.meshgrid(arr2, arr3, indexing='ij') +# mat12_x, mat12_y = np.meshgrid(arr1, arr2, indexing='ij') +# mat13_x, mat13_z = np.meshgrid(arr1, arr3, indexing='ij') +# mat23_y, mat23_z = np.meshgrid(arr2, arr3, indexing='ij') # # # eta1-eta2 matrix transformation: # a = domain.transform(fun_form, mat12_x, mat12_y, .5, p_str) @@ -908,13 +903,13 @@ def fun(e1, e2, e3): # assert a.shape == mat23_y.shape # # # matrix transformation for sparse meshgrid -# mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing='ij', sparse=True) +# mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing='ij', sparse=True) # a = domain.transform(fun_form, mat_x, mat_y, mat_z, p_str) # #print('sparse meshgrid matrix transformation, shape:', a.shape) # assert a.shape[0] == mat_x.shape[0] and a.shape[1] == mat_y.shape[1] and a.shape[2] == mat_z.shape[2] # # # matrix transformation -# mat_x, mat_y, mat_z = xp.meshgrid(arr1, arr2, arr3, indexing='ij') +# mat_x, mat_y, mat_z = np.meshgrid(arr1, arr2, arr3, indexing='ij') # a = domain.transform(fun_form, mat_x, mat_y, mat_z, p_str) # #print('matrix transformation, shape:', a.shape) # assert a.shape == mat_x.shape diff --git a/src/struphy/geometry/transform_kernels.py b/src/struphy/geometry/transform_kernels.py index f9e6d8077..c95156b96 100644 --- a/src/struphy/geometry/transform_kernels.py +++ b/src/struphy/geometry/transform_kernels.py @@ -54,13 +54,7 @@ @stack_array("dfmat1", "dfmat2") def pull( - a: "float[:]", - eta1: float, - eta2: float, - eta3: float, - kind_fun: int, - args_domain: "DomainArguments", - out: "float[:]", + a: "float[:]", eta1: float, eta2: float, eta3: float, kind_fun: int, args_domain: "DomainArguments", out: "float[:]" ): """ Pull-back of a Cartesian scalar/vector field to a differential p-form. @@ -120,13 +114,7 @@ def pull( @stack_array("dfmat1", "dfmat2", "dfmat3") def push( - a: "float[:]", - eta1: float, - eta2: float, - eta3: float, - kind_fun: int, - args_domain: "DomainArguments", - out: "float[:]", + a: "float[:]", eta1: float, eta2: float, eta3: float, kind_fun: int, args_domain: "DomainArguments", out: "float[:]" ): """ Pushforward of a differential p-forms to a Cartesian scalar/vector field. @@ -184,13 +172,7 @@ def push( @stack_array("dfmat1", "dfmat2", "dfmat3", "vec1", "vec2") def tran( - a: "float[:]", - eta1: float, - eta2: float, - eta3: float, - kind_fun: int, - args_domain: "DomainArguments", - out: "float[:]", + a: "float[:]", eta1: float, eta2: float, eta3: float, kind_fun: int, args_domain: "DomainArguments", out: "float[:]" ): """ Transformations between differential p-forms and/or vector fields. diff --git a/src/struphy/geometry/utilities.py b/src/struphy/geometry/utilities.py index ccc159692..732cf48c0 100644 --- a/src/struphy/geometry/utilities.py +++ b/src/struphy/geometry/utilities.py @@ -1,23 +1,14 @@ -# from __future__ import annotations "Domain-related utility functions." -from typing import Callable - -import cunumpy as xp -import numpy as np - -# from typing import TYPE_CHECKING from scipy.optimize import newton, root, root_scalar from scipy.sparse import csc_matrix from scipy.sparse.linalg import splu from struphy.bsplines import bsplines as bsp - -# if TYPE_CHECKING: -from struphy.geometry.base import Domain, PoloidalSplineTorus +from struphy.geometry.base import PoloidalSplineTorus from struphy.geometry.utilities_kernels import weighted_arc_lengths_flux_surface -from struphy.io.options import GivenInBasis from struphy.linear_algebra.linalg_kron import kron_lusolve_2d +from struphy.utils.arrays import xp as np def field_line_tracing( @@ -129,10 +120,10 @@ def field_line_tracing( Returns ------- - cR : xp.ndarray + cR : np.ndarray Control points (2d) of flux aligned spline mapping (R-component). - cZ : xp.ndarray + cZ : np.ndarray Control points (2d) of flux aligned spline mapping (Z-component). """ @@ -145,8 +136,8 @@ def field_line_tracing( ps, px = p_pre # spline knots - Ts = bsp.make_knots(xp.linspace(0.0, 1.0, ns + 1), ps, False) - Tx = bsp.make_knots(xp.linspace(0.0, 1.0, nx + 1), px, True) + Ts = bsp.make_knots(np.linspace(0.0, 1.0, ns + 1), ps, False) + Tx = bsp.make_knots(np.linspace(0.0, 1.0, nx + 1), px, True) # interpolation (Greville) points s_gr = bsp.greville(Ts, ps, False) @@ -165,13 +156,13 @@ def field_line_tracing( ] # check if pole is included - if xp.abs(psi(psi_axis_R, psi_axis_Z) - psi0) < 1e-14: + if np.abs(psi(psi_axis_R, psi_axis_Z) - psi0) < 1e-14: pole = True else: pole = False - R = xp.zeros((s_gr.size, x_gr.size), dtype=float) - Z = xp.zeros((s_gr.size, x_gr.size), dtype=float) + R = np.zeros((s_gr.size, x_gr.size), dtype=float) + Z = np.zeros((s_gr.size, x_gr.size), dtype=float) # function whose root must be found for j, x in enumerate(x_gr): @@ -188,8 +179,8 @@ def field_line_tracing( # function whose root must be found def f(r): - _R = psi_axis_R + r * xp.cos(2 * xp.pi * x) - _Z = psi_axis_Z + r * xp.sin(2 * xp.pi * x) + _R = psi_axis_R + r * np.cos(2 * np.pi * x) + _Z = psi_axis_Z + r * np.sin(2 * np.pi * x) psi_norm = (psi(_R, _Z) - psi0) / (psi1 - psi0) @@ -200,8 +191,8 @@ def f(r): r_flux_surface = newton(f, x0=r_guess) - R[i, j] = psi_axis_R + r_flux_surface * xp.cos(2 * xp.pi * x) - Z[i, j] = psi_axis_Z + r_flux_surface * xp.sin(2 * xp.pi * x) + R[i, j] = psi_axis_R + r_flux_surface * np.cos(2 * np.pi * x) + Z[i, j] = psi_axis_Z + r_flux_surface * np.sin(2 * np.pi * x) # get control points cR_equal_angle = kron_lusolve_2d(ILUs, R) @@ -227,8 +218,8 @@ def f(r): ps, px = p # spline knots - Ts = bsp.make_knots(xp.linspace(0.0, 1.0, ns + 1), ps, False) - Tx = bsp.make_knots(xp.linspace(0.0, 1.0, nx + 1), px, True) + Ts = bsp.make_knots(np.linspace(0.0, 1.0, ns + 1), ps, False) + Tx = bsp.make_knots(np.linspace(0.0, 1.0, nx + 1), px, True) # interpolation (Greville) points s_gr = bsp.greville(Ts, ps, False) @@ -255,10 +246,10 @@ def f(r): # target function for xi parametrization def f_angles(xis, s_val): - assert xp.all(xp.logical_and(xis > 0.0, xis < 1.0)) + assert np.all(np.logical_and(xis > 0.0, xis < 1.0)) # add 0 and 1 to angles array - xis_extended = xp.array([0.0] + list(xis) + [1.0]) + xis_extended = np.array([0.0] + list(xis) + [1.0]) # compute (R, Z) coordinates for given xis on fixed flux surface corresponding to s_val _RZ = domain_eq_angle(s_val, xis_extended, 0.0) @@ -267,17 +258,17 @@ def f_angles(xis, s_val): _Z = _RZ[2] # |grad(psi)| at xis - gp = xp.sqrt(psi(_R, _Z, dR=1) ** 2 + psi(_R, _Z, dZ=1) ** 2) + gp = np.sqrt(psi(_R, _Z, dR=1) ** 2 + psi(_R, _Z, dZ=1) ** 2) # compute weighted arc_lengths between two successive points in xis_extended array - dl = xp.zeros(xis_extended.size - 1, dtype=float) + dl = np.zeros(xis_extended.size - 1, dtype=float) weighted_arc_lengths_flux_surface(_R, _Z, gp, dl, xi_param_dict[xi_param]) # total length of the flux surface - l = xp.sum(dl) + l = np.sum(dl) # cumulative sum of arc lengths, start with 0! - l_cum = xp.cumsum(dl) + l_cum = np.cumsum(dl) # odd spline degree if px % 2 == 1: @@ -289,8 +280,8 @@ def f_angles(xis, s_val): return xi_diff # loop over flux surfaces and find xi parametrization - R = xp.zeros((s_gr.size, x_gr.size), dtype=float) - Z = xp.zeros((s_gr.size, x_gr.size), dtype=float) + R = np.zeros((s_gr.size, x_gr.size), dtype=float) + Z = np.zeros((s_gr.size, x_gr.size), dtype=float) if px % 2 == 1: xis0 = x_gr[1:].copy() @@ -342,10 +333,10 @@ class TransformedPformComponent: Parameters ---------- - fun : Callable | list - Callable function (components). Has to be length three for vector-valued funnctions,. + fun : list + Callable function components. Has to be length three for 1-, 2-forms and vector fields, length one otherwise. - given_in_basis : GivenInBasis + fun_basis : str In which basis fun is represented: either a p-form, then '0' or '3' for scalar and 'v', '1' or '2' for vector-valued, @@ -357,24 +348,19 @@ class TransformedPformComponent: The p-form representation of the output: '0', '1', '2' '3' or 'v'. comp : int - Which component of the vector-valued function to return (=0 for scalars). + Which component of the transformed p-form is returned, 0, 1, or 2 (only needed for vector-valued fun). domain: struphy.geometry.domains All things mapping. If None, the input fun is just evaluated and not transformed at __call__. + + Returns + ------- + out : array[float] + The values of the component comp of fun transformed from fun_basis to out_form. """ - def __init__( - self, - fun: Callable | list, - given_in_basis: GivenInBasis, - out_form: str, - comp: int = 0, - domain: Domain = None, - ): - if isinstance(fun, list): - assert len(fun) == 1 or len(fun) == 3 - else: - fun = [fun] + def __init__(self, fun: list, fun_basis: str, out_form: str, comp=0, domain=None): + assert len(fun) == 1 or len(fun) == 3 self._fun = [] for f in fun: @@ -388,7 +374,7 @@ def f_zero(x, y, z): assert callable(f) self._fun += [f] - self._given_in_basis = given_in_basis + self._fun_basis = fun_basis self._out_form = out_form self._comp = comp self._domain = domain @@ -405,19 +391,19 @@ def f_zero(x, y, z): def __call__(self, eta1, eta2, eta3): """ - Evaluate the component of the transformed p-form specified 'comp'. + Evaluate the component of the transformed p-form specified in self._comp. Depending on the dimension of eta1 either point-wise, tensor-product, slice plane or general (see :ref:`struphy.geometry.base.prepare_arg`). """ - if self._given_in_basis == self._out_form or self._domain is None: + if self._fun_basis == self._out_form or self._domain is None: if self._is_scalar: out = self._fun(eta1, eta2, eta3) else: out = self._fun[self._comp](eta1, eta2, eta3) - elif self._given_in_basis == "physical": + elif self._fun_basis == "physical": if self._is_scalar: out = self._domain.pull( self._fun, @@ -435,7 +421,7 @@ def __call__(self, eta1, eta2, eta3): kind=self._out_form, )[self._comp] - elif self._given_in_basis == "physical_at_eta": + elif self._fun_basis == "physical_at_eta": if self._is_scalar: out = self._domain.pull( self._fun, @@ -456,7 +442,7 @@ def __call__(self, eta1, eta2, eta3): )[self._comp] else: - dict_tran = self._given_in_basis + "_to_" + self._out_form + dict_tran = self._fun_basis + "_to_" + self._out_form if self._is_scalar: out = self._domain.transform( diff --git a/src/struphy/geometry/utilities_kernels.py b/src/struphy/geometry/utilities_kernels.py index 1c26b25c5..85ccdbcf9 100644 --- a/src/struphy/geometry/utilities_kernels.py +++ b/src/struphy/geometry/utilities_kernels.py @@ -18,16 +18,16 @@ def weighted_arc_lengths_flux_surface(r: "float[:]", z: "float[:]", grad_psi: "f Parameters ---------- - r : xp.ndarray + r : np.ndarray R coordinates of the flux surface. - z : xp.ndarray + z : np.ndarray Z coordinates of the flux surface. - grad_psi : xp.ndarray + grad_psi : np.ndarray Absolute values of the flux function gradient on the flux surface: |grad(psi)| = sqrt[ (d_R psi)**2 + (d_Z psi)**2 ]. - dwls : xp.ndarray + dwls : np.ndarray The weighted arc lengths will be written into this array. Length must be one smaller than lengths of r, z and grad_psi. kind : int diff --git a/src/struphy/initial/eigenfunctions.py b/src/struphy/initial/eigenfunctions.py index 239ae24a9..172386bdf 100644 --- a/src/struphy/initial/eigenfunctions.py +++ b/src/struphy/initial/eigenfunctions.py @@ -1,11 +1,11 @@ import os -import cunumpy as xp import yaml from psydac.api.discretization import discretize from sympde.topology import Derham, Line from struphy.fields_background.equils import set_defaults +from struphy.utils.arrays import xp as np class InitialMHDAxisymHdivEigFun: @@ -54,11 +54,11 @@ def __init__(self, derham, **params): spec_path = params["spec_abs"] # load eigenvector for velocity field - omega2, U2_eig = xp.split(xp.load(spec_path), [1], axis=0) + omega2, U2_eig = np.split(np.load(spec_path), [1], axis=0) omega2 = omega2.flatten() # find eigenvector corresponding to given squared eigenfrequency range - mode = xp.where((xp.real(omega2) < params["eig_freq_upper"]) & (xp.real(omega2) > params["eig_freq_lower"]))[0] + mode = np.where((np.real(omega2) < params["eig_freq_upper"]) & (np.real(omega2) > params["eig_freq_lower"]))[0] assert mode.size == 1 mode = mode[0] @@ -67,16 +67,13 @@ def __init__(self, derham, **params): nnz_tor = derham.boundary_ops["2"].dim_nz_tor eig_vec_1 = U2_eig[ - 0 * nnz_pol[0] + 0 * nnz_pol[1] + 0 * nnz_pol[2] : 1 * nnz_pol[0] + 0 * nnz_pol[1] + 0 * nnz_pol[2], - mode, + 0 * nnz_pol[0] + 0 * nnz_pol[1] + 0 * nnz_pol[2] : 1 * nnz_pol[0] + 0 * nnz_pol[1] + 0 * nnz_pol[2], mode ] eig_vec_2 = U2_eig[ - 1 * nnz_pol[0] + 0 * nnz_pol[1] + 0 * nnz_pol[2] : 1 * nnz_pol[0] + 1 * nnz_pol[1] + 0 * nnz_pol[2], - mode, + 1 * nnz_pol[0] + 0 * nnz_pol[1] + 0 * nnz_pol[2] : 1 * nnz_pol[0] + 1 * nnz_pol[1] + 0 * nnz_pol[2], mode ] eig_vec_3 = U2_eig[ - 1 * nnz_pol[0] + 1 * nnz_pol[1] + 0 * nnz_pol[2] : 1 * nnz_pol[0] + 1 * nnz_pol[1] + 1 * nnz_pol[2], - mode, + 1 * nnz_pol[0] + 1 * nnz_pol[1] + 0 * nnz_pol[2] : 1 * nnz_pol[0] + 1 * nnz_pol[1] + 1 * nnz_pol[2], mode ] del omega2, U2_eig @@ -92,28 +89,28 @@ def __init__(self, derham, **params): n_tor = int(os.path.split(spec_path)[-1][-6:-4]) - N_cos = p0(lambda phi: xp.cos(2 * xp.pi * n_tor * phi)).coeffs.toarray() - N_sin = p0(lambda phi: xp.sin(2 * xp.pi * n_tor * phi)).coeffs.toarray() + N_cos = p0(lambda phi: np.cos(2 * np.pi * n_tor * phi)).coeffs.toarray() + N_sin = p0(lambda phi: np.sin(2 * np.pi * n_tor * phi)).coeffs.toarray() - D_cos = p1(lambda phi: xp.cos(2 * xp.pi * n_tor * phi)).coeffs.toarray() - D_sin = p1(lambda phi: xp.sin(2 * xp.pi * n_tor * phi)).coeffs.toarray() + D_cos = p1(lambda phi: np.cos(2 * np.pi * n_tor * phi)).coeffs.toarray() + D_sin = p1(lambda phi: np.sin(2 * np.pi * n_tor * phi)).coeffs.toarray() # select real part or imaginary part assert params["kind"] == "r" or params["kind"] == "i" if params["kind"] == "r": - eig_vec_1 = (xp.outer(xp.real(eig_vec_1), D_cos) - xp.outer(xp.imag(eig_vec_1), D_sin)).flatten() - eig_vec_2 = (xp.outer(xp.real(eig_vec_2), D_cos) - xp.outer(xp.imag(eig_vec_2), D_sin)).flatten() - eig_vec_3 = (xp.outer(xp.real(eig_vec_3), N_cos) - xp.outer(xp.imag(eig_vec_3), N_sin)).flatten() + eig_vec_1 = (np.outer(np.real(eig_vec_1), D_cos) - np.outer(np.imag(eig_vec_1), D_sin)).flatten() + eig_vec_2 = (np.outer(np.real(eig_vec_2), D_cos) - np.outer(np.imag(eig_vec_2), D_sin)).flatten() + eig_vec_3 = (np.outer(np.real(eig_vec_3), N_cos) - np.outer(np.imag(eig_vec_3), N_sin)).flatten() else: - eig_vec_1 = (xp.outer(xp.imag(eig_vec_1), D_cos) + xp.outer(xp.real(eig_vec_1), D_sin)).flatten() - eig_vec_2 = (xp.outer(xp.imag(eig_vec_2), D_cos) + xp.outer(xp.real(eig_vec_2), D_sin)).flatten() - eig_vec_3 = (xp.outer(xp.imag(eig_vec_3), N_cos) + xp.outer(xp.real(eig_vec_3), N_sin)).flatten() + eig_vec_1 = (np.outer(np.imag(eig_vec_1), D_cos) + np.outer(np.real(eig_vec_1), D_sin)).flatten() + eig_vec_2 = (np.outer(np.imag(eig_vec_2), D_cos) + np.outer(np.real(eig_vec_2), D_sin)).flatten() + eig_vec_3 = (np.outer(np.imag(eig_vec_3), N_cos) + np.outer(np.real(eig_vec_3), N_sin)).flatten() # set coefficients in full space - eigvec_1_ten = xp.zeros(derham.nbasis["2"][0], dtype=float) - eigvec_2_ten = xp.zeros(derham.nbasis["2"][1], dtype=float) - eigvec_3_ten = xp.zeros(derham.nbasis["2"][2], dtype=float) + eigvec_1_ten = np.zeros(derham.nbasis["2"][0], dtype=float) + eigvec_2_ten = np.zeros(derham.nbasis["2"][1], dtype=float) + eigvec_3_ten = np.zeros(derham.nbasis["2"][2], dtype=float) bc1_1 = derham.dirichlet_bc[0][0] bc1_2 = derham.dirichlet_bc[0][1] @@ -141,19 +138,19 @@ def __init__(self, derham, **params): else: # split into polar/tensor product parts - eig_vec_1 = xp.split( + eig_vec_1 = np.split( eig_vec_1, [ derham.Vh_pol["2"].n_polar[0] * nnz_tor[0], ], ) - eig_vec_2 = xp.split( + eig_vec_2 = np.split( eig_vec_2, [ derham.Vh_pol["2"].n_polar[1] * nnz_tor[1], ], ) - eig_vec_3 = xp.split( + eig_vec_3 = np.split( eig_vec_3, [ derham.Vh_pol["2"].n_polar[2] * nnz_tor[2], @@ -185,7 +182,7 @@ def __init__(self, derham, **params): ] eigvec_1_ten[derham.Vh_pol["2"].n_rings[0] : derham.nbasis["2"][0][0] - bc1_2, :, :] = eig_vec_1[1].reshape( - n_v2_0[0], + n_v2_0[0] ) eigvec_2_ten[derham.Vh_pol["2"].n_rings[1] :, :, :] = eig_vec_2[1].reshape(n_v2_0[1]) eigvec_3_ten[derham.Vh_pol["2"].n_rings[2] :, :, :] = eig_vec_3[1].reshape(n_v2_0[2]) diff --git a/src/struphy/initial/perturbations.py b/src/struphy/initial/perturbations.py index 4c113d02d..9062c42b3 100644 --- a/src/struphy/initial/perturbations.py +++ b/src/struphy/initial/perturbations.py @@ -1,48 +1,13 @@ #!/usr/bin/env python3 -"Analytical perturbations." +"Analytical perturbations (modes)." -from dataclasses import dataclass - -import cunumpy as xp import scipy import scipy.special -from struphy.initial.base import Perturbation -from struphy.io.options import GivenInBasis, NoiseDirections, check_option - - -@dataclass -class Noise(Perturbation): - """White noise for FEEC coefficients. - - Parameters - ---------- - direction: str - The direction(s) of variation of the noise: 'e1', 'e2', 'e3', 'e1e2', etc. +from struphy.utils.arrays import xp as np - amp: float - Noise amplitude. - seed: int - Seed for the random number generator. - """ - - direction: NoiseDirections = "e3" - amp: float = 0.0001 - seed: int = None - comp: int = 0 - given_in_basis: GivenInBasis = "0" - - def __post_init__( - self, - ): - check_option(self.direction, NoiseDirections) - - def __call__(self): - pass - - -class ModesSin(Perturbation): +class ModesSin: r"""Sinusoidal function in 3D. .. math:: @@ -61,25 +26,25 @@ class ModesSin(Perturbation): \end{aligned} \right. - Can be used in logical space (use 'given_in_basis'), where :math:`x \to \eta_1,\, y\to \eta_2,\, z \to \eta_3` + Can be used in logical space, where :math:`x \to \eta_1,\, y\to \eta_2,\, z \to \eta_3` and :math:`L_x=L_y=L_z=1.0` (default). Parameters ---------- - ls : tuple[int] + ls : tuple | list Mode numbers in x-direction (kx = l*2*pi/Lx). - ms : tuple[int] + ms : tuple | list Mode numbers in y-direction (ky = m*2*pi/Ly). - ns : tuple[int] + ns : tuple | list Mode numbers in z-direction (kz = n*2*pi/Lz). - amps : tuple[float] + amps : tuple | list Amplitude of each mode. theta : tuple | list - Phase of each mode. + Phase of each mode pfuns : tuple | list[str] "Id" or "localize" define the profile functions. @@ -92,27 +57,43 @@ class ModesSin(Perturbation): Lx, Ly, Lz : float Domain lengths. - given_in_basis : str - In which basis the perturbation is represented (see base class). - - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) + Note + ---- + Example of use in a ``.yml`` parameter file:: + + perturbations : + type : ModesSin + ModesSin : + comps : + scalar_name : '0' # choices: null, 'physical', '0', '3' + vector_name : [null , 'v', '2'] # choices: null, 'physical', '1', '2', 'v', 'norm' + ls : + scalar_name: [1, 3] # two x-modes for scalar variable + vector_name: [null, [0, 1], [4]] # two x-modes for 2nd comp. and one x-mode for third component of vector-valued variable + theta : + scalar_name: [0, 3.1415] + vector_name: [null, [0, 0], [1.5708]] + pfuns : + vector_name: [null, ['localize'], ['Id']] + pfuns_params + vector_name: [null, ['0.1'], [0.]] + Lx : 7.853981633974483 + Ly : 1. + Lz : 1. """ def __init__( self, - ls: tuple[int] = None, - ms: tuple[int] = None, - ns: tuple[int] = None, - amps: tuple[float] = (1e-4,), - theta: tuple[float] = None, + ls=None, + ms=None, + ns=None, + amps=(1e-4,), + theta=None, pfuns=("Id",), pfuns_params=(0.0,), Lx=1.0, Ly=1.0, Lz=1.0, - given_in_basis: GivenInBasis = "0", - comp: int = 0, ): if ls is not None: n_modes = len(ls) @@ -162,17 +143,6 @@ def __init__( else: assert len(pfuns_params) == n_modes - self._pfuns = [] - for pfun, params in zip(pfuns, pfuns_params): - if pfun == "Id": - self._pfuns += [lambda eta3: 1.0] - elif pfun == "localize": - self._pfuns += [ - lambda eta3: xp.tanh((eta3 - 0.5) / params) / xp.cosh((eta3 - 0.5) / params), - ] - else: - raise ValueError(f"Profile function {pfun} is not defined..") - self._ls = ls self._ms = ms self._ns = ns @@ -182,9 +152,16 @@ def __init__( self._Lz = Lz self._theta = theta - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp + self._pfuns = [] + for pfun, params in zip(pfuns, pfuns_params): + if pfun == "Id": + self._pfuns += [lambda eta3: 1.0] + elif pfun == "localize": + self._pfuns += [ + lambda eta3: np.tanh((eta3 - 0.5) / params) / np.cosh((eta3 - 0.5) / params), + ] + else: + raise ValueError(f"Profile function {pfun} is not defined..") def __call__(self, x, y, z): val = 0.0 @@ -193,10 +170,10 @@ def __call__(self, x, y, z): val += ( amp * pfun(z) - * xp.sin( - l * 2.0 * xp.pi / self._Lx * x - + m * 2.0 * xp.pi / self._Ly * y - + n * 2.0 * xp.pi / self._Lz * z + * np.sin( + l * 2.0 * np.pi / self._Lx * x + + m * 2.0 * np.pi / self._Ly * y + + n * 2.0 * np.pi / self._Lz * z + t, ) ) @@ -204,52 +181,52 @@ def __call__(self, x, y, z): return val -class ModesCos(Perturbation): +class ModesCos: r"""Cosinusoidal function in 3D. .. math:: u(x, y, z) = \sum_{s} A_s \cos \left(l_s \frac{2\pi}{L_x} x + m_s \frac{2\pi}{L_y} y + n_s \frac{2\pi}{L_z} z \right) \,. - Can be used in logical space (use 'given_in_basis'), where :math:`x \to \eta_1,\, y\to \eta_2,\, z \to \eta_3` + Can be used in logical space, where :math:`x \to \eta_1,\, y\to \eta_2,\, z \to \eta_3` and :math:`L_x=L_y=L_z=1.0` (default). Parameters ---------- - ls : tuple[int] + ls : tuple | list Mode numbers in x-direction (kx = l*2*pi/Lx). - ms : tuple[int] + ms : tuple | list Mode numbers in y-direction (ky = m*2*pi/Ly). - ns : tuple[int] + ns : tuple | list Mode numbers in z-direction (kz = n*2*pi/Lz). - amps : tuple[float] + amps : tuple | list Amplitude of each mode. Lx, Ly, Lz : float Domain lengths. - given_in_basis : str - In which basis the perturbation is represented (see base class). - - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) + Note + ---- + Example of use in a ``.yml`` parameter file:: + + perturbations : + type : ModesCos + ModesCos : + comps : + scalar_name : '0' # choices: null, 'physical', '0', '3' + vector_name : [null , 'v', '2'] # choices: null, 'physical', '1', '2', 'v', 'norm' + ls : + scalar_name: [1, 3] # two x-modes for scalar variable + vector_name: [null, [0, 1], [4]] # two x-modes for 2nd comp. and one x-mode for third component of vector-valued variable + Lx : 7.853981633974483 + Ly : 1. + Lz : 1. """ - def __init__( - self, - ls: tuple[int] = None, - ms: tuple[int] = None, - ns: tuple[int] = None, - amps: tuple[float] = (1e-4,), - Lx=1.0, - Ly=1.0, - Lz=1.0, - given_in_basis: GivenInBasis = "0", - comp: int = 0, - ): + def __init__(self, ls=None, ms=None, ns=None, amps=(1e-4,), Lx=1.0, Ly=1.0, Lz=1.0): if ls is not None: n_modes = len(ls) elif ms is not None: @@ -288,22 +265,18 @@ def __init__( self._Ly = Ly self._Lz = Lz - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, x, y, z): val = 0.0 for amp, l, m, n in zip(self._amps, self._ls, self._ms, self._ns): - val += amp * xp.cos( - l * 2.0 * xp.pi / self._Lx * x + m * 2.0 * xp.pi / self._Ly * y + n * 2.0 * xp.pi / self._Lz * z, + val += amp * np.cos( + l * 2.0 * np.pi / self._Lx * x + m * 2.0 * np.pi / self._Ly * y + n * 2.0 * np.pi / self._Lz * z, ) # print( "Cos max value", val.max()) return val -class CoaxialWaveguideElectric_r(Perturbation): +class CoaxialWaveguideElectric_r: r"""Initializes function for Coaxial Waveguide electric field in radial direction. Solutions taken from TUM master thesis of Alicia Robles Pérez: @@ -326,25 +299,21 @@ def __init__(self, m=1, a1=1.0, a2=2.0, a=1, b=-0.28): self._a = a self._b = b - # use the setters - self.given_in_basis = "norm" - self.comp = 0 - def __call__(self, eta1, eta2, eta3): val = 0.0 r = eta1 * (self._r2 - self._r1) + self._r1 - theta = eta2 * 2.0 * xp.pi + theta = eta2 * 2.0 * np.pi val += ( -self._m / r - * xp.cos(self._m * theta) + * np.cos(self._m * theta) * (self._a * scipy.special.jv(self._m, r) + self._b * scipy.special.yn(self._m, r)) ) return val -class CoaxialWaveguideElectric_theta(Perturbation): +class CoaxialWaveguideElectric_theta: r""" Initializes funtion for Coaxial Waveguide electric field in the azimuthal direction. @@ -368,23 +337,19 @@ def __init__(self, m=1, a1=1.0, a2=2.0, a=1, b=-0.28): self._a = a self._b = b - # use the setters - self.given_in_basis = "norm" - self.comp = 1 - def __call__(self, eta1, eta2, eta3): val = 0.0 r = eta1 * (self._r2 - self._r1) + self._r1 - theta = eta2 * 2.0 * xp.pi + theta = eta2 * 2.0 * np.pi val += ( self._a * ((self._m / r) * scipy.special.jv(self._m, r) - scipy.special.jv(self._m + 1, r)) + (self._b * ((self._m / r) * scipy.special.yn(self._m, r) - scipy.special.yn(self._m + 1, r))) - ) * xp.sin(self._m * theta) + ) * np.sin(self._m * theta) return val -class CoaxialWaveguideMagnetic(Perturbation): +class CoaxialWaveguideMagnetic: r"""Initializes funtion for Coaxial Waveguide magnetic field in $z$-direction. Solutions taken from TUM master thesis of Alicia Robles Pérez: @@ -407,23 +372,19 @@ def __init__(self, m=1, a1=1.0, a2=2.0, a=1, b=-0.28): self._a = a self._b = b - # use the setters - self.given_in_basis = "norm" - self.comp = 2 - def __call__(self, eta1, eta2, eta3): val = 0.0 r = eta1 * (self._r2 - self._r1) + self._r1 - theta = eta2 * 2.0 * xp.pi + theta = eta2 * 2.0 * np.pi z = eta3 - val += (self._a * scipy.special.jv(self._m, r) + self._b * scipy.special.yn(self._m, r)) * xp.cos( - self._m * theta, + val += (self._a * scipy.special.jv(self._m, r) + self._b * scipy.special.yn(self._m, r)) * np.cos( + self._m * theta ) return val -class ModesCosCos(Perturbation): +class ModesCosCos: r""" .. math:: @@ -448,8 +409,6 @@ def __init__( Lx=1.0, Ly=1.0, Lz=1.0, - given_in_basis: GivenInBasis = "0", - comp: int = 0, ): if ls is not None: n_modes = len(ls) @@ -509,27 +468,23 @@ def __init__( if pfun == "Id": self._pfuns += [lambda z: 1.0] elif pfun == "localize": - self._pfuns += [lambda z, p=params: xp.tanh((z - 0.5) / p) / xp.cosh((z - 0.5) / p)] + self._pfuns += [lambda z, p=params: np.tanh((z - 0.5) / p) / np.cosh((z - 0.5) / p)] else: raise ValueError(f"Profile function {pfun} is not defined..") - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, x, y, z): val = 0.0 for amp, l, m, thx, thy, pfun in zip(self._amps, self._ls, self._ms, self._theta_x, self._theta_y, self._pfuns): val += ( amp * pfun(z) - * xp.cos(l * 2.0 * xp.pi / self._Lx * x + thx) - * xp.cos(m * 2.0 * xp.pi / self._Ly * y + thy) + * np.cos(l * 2.0 * np.pi / self._Lx * x + thx) + * np.cos(m * 2.0 * np.pi / self._Ly * y + thy) ) return val -class ModesSinSin(Perturbation): +class ModesSinSin: r""" .. math:: @@ -553,8 +508,6 @@ def __init__( Lx=1.0, Ly=1.0, Lz=1.0, - given_in_basis: GivenInBasis = "0", - comp: int = 0, ): if ls is not None: n_modes = len(ls) @@ -614,27 +567,23 @@ def __init__( if pfun == "Id": self._pfuns += [lambda z: 1.0] elif pfun == "localize": - self._pfuns += [lambda z, p=params: xp.tanh((z - 0.5) / p) / xp.cosh((z - 0.5) / p)] + self._pfuns += [lambda z, p=params: np.tanh((z - 0.5) / p) / np.cosh((z - 0.5) / p)] else: raise ValueError(f"Profile function {pfun} is not defined..") - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, x, y, z): val = 0.0 for amp, l, m, thx, thy, pfun in zip(self._amps, self._ls, self._ms, self._theta_x, self._theta_y, self._pfuns): val += ( amp * pfun(z) - * xp.sin(l * 2.0 * xp.pi / self._Lx * x + thx) - * xp.sin(m * 2.0 * xp.pi / self._Ly * y + thy) + * np.sin(l * 2.0 * np.pi / self._Lx * x + thx) + * np.sin(m * 2.0 * np.pi / self._Ly * y + thy) ) return val -class ModesSinCos(Perturbation): +class ModesSinCos: r""" .. math:: @@ -658,8 +607,6 @@ def __init__( Lx=1.0, Ly=1.0, Lz=1.0, - given_in_basis: GivenInBasis = "0", - comp: int = 0, ): # number of modes if ls is not None: @@ -721,27 +668,23 @@ def __init__( if pfun == "Id": self._pfuns += [lambda z: 1.0] elif pfun == "localize": - self._pfuns += [lambda z, p=params: xp.tanh((z - 0.5) / p) / xp.cosh((z - 0.5) / p)] + self._pfuns += [lambda z, p=params: np.tanh((z - 0.5) / p) / np.cosh((z - 0.5) / p)] else: raise ValueError(f"Profile function {pfun} is not defined..") - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, x, y, z): val = 0.0 for amp, l, m, thx, thy, pfun in zip(self._amps, self._ls, self._ms, self._theta_x, self._theta_y, self._pfuns): val += ( amp * pfun(z) - * xp.sin(l * 2.0 * xp.pi / self._Lx * x + thx) - * xp.cos(m * 2.0 * xp.pi / self._Ly * y + thy) + * np.sin(l * 2.0 * np.pi / self._Lx * x + thx) + * np.cos(m * 2.0 * np.pi / self._Ly * y + thy) ) return val -class ModesCosSin(Perturbation): +class ModesCosSin: r""" .. math:: @@ -765,8 +708,6 @@ def __init__( Lx=1.0, Ly=1.0, Lz=1.0, - given_in_basis: GivenInBasis = "0", - comp: int = 0, ): # number of modes if ls is not None: @@ -828,27 +769,23 @@ def __init__( if pfun == "Id": self._pfuns += [lambda z: 1.0] elif pfun == "localize": - self._pfuns += [lambda z, p=params: xp.tanh((z - 0.5) / p) / xp.cosh((z - 0.5) / p)] + self._pfuns += [lambda z, p=params: np.tanh((z - 0.5) / p) / np.cosh((z - 0.5) / p)] else: raise ValueError(f"Profile function {pfun} is not defined..") - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, x, y, z): val = 0.0 for amp, l, m, thx, thy, pfun in zip(self._amps, self._ls, self._ms, self._theta_x, self._theta_y, self._pfuns): val += ( amp * pfun(z) - * xp.cos(l * 2.0 * xp.pi / self._Lx * x + thx) - * xp.sin(m * 2.0 * xp.pi / self._Ly * y + thy) + * np.cos(l * 2.0 * np.pi / self._Lx * x + thx) + * np.sin(m * 2.0 * np.pi / self._Ly * y + thy) ) return val -class TorusModesSin(Perturbation): +class TorusModesSin: r"""Sinusoidal function in the periodic coordinates of a Torus. .. math:: @@ -869,7 +806,7 @@ class TorusModesSin(Perturbation): \end{aligned} \right. - Can ony be used in logical space (use 'given_in_basis'). + Can only be defined in logical coordinates. Parameters ---------- @@ -888,25 +825,40 @@ class TorusModesSin(Perturbation): pfun_params : tuple | list Provides :math:`[r_0, \sigma]` parameters for each "exp" profile fucntion, and l_s for "sin" and "cos". - given_in_basis : str - In which basis the perturbation is represented (see base class). - - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) + Note + ---- + In the parameter .yml, use the following template in the section ``fluid/``:: + + perturbations : + type : TorusModesSin + TorusModesSin : + comps : + n3 : null # choices: null, 'physical', '0', '3' + u2 : ['physical', 'v', '2'] # choices: null, 'physical', '1', '2', 'v', 'norm' + p3 : '0' # choices: null, 'physical', '0', '3' + ms : + n3: null # poloidal mode numbers + u2: [[0], [0], [0]] # poloidal mode numbers + p3: [0] # poloidal mode numbers + ns : + n3: null # toroidal mode numbers + u2: [[1], [1], [1]] # toroidal mode numbers + p3: [1] # toroidal mode numbers + amps : + n3: null # amplitudes of each mode + u2: [[0.001], [0.001], [0.001]] # amplitudes of each mode + p3: [0.01] # amplitudes of each mode + pfuns : + n3: null # profile function in eta1-direction ('sin' or 'cos' or 'exp' or 'd_exp') + u2: [['sin'], ['sin'], ['exp']] # profile function in eta1-direction ('sin' or 'cos' or 'exp' or 'd_exp') + p3: [0.01] # profile function in eta1-direction ('sin' or 'cos' or 'exp' or 'd_exp') + pfun_params : + n3: null # Provides [r_0, sigma] parameters for each "exp" and "d_exp" profile fucntion, and l_s for "sin" and "cos" + u2: [2, null, [[0.5, 1.]]] # Provides [r_0, sigma] parameters for each "exp" and "d_exp" profile fucntion, and l_s for "sin" and "cos" + p3: [0.01] # Provides [r_0, sigma] parameters for each "exp" and "d_exp" profile fucntion, and l_s for "sin" and "cos" """ - def __init__( - self, - ms=None, - ns=None, - amps=(1e-4,), - pfuns=("sin",), - pfun_params=None, - given_in_basis: GivenInBasis = "0", - comp: int = 0, - ): - assert "physical" not in given_in_basis - + def __init__(self, ms=None, ns=None, amps=(1e-4,), pfuns=("sin",), pfun_params=None): if ms is not None: n_modes = len(ms) elif ns is not None: @@ -946,41 +898,37 @@ def __init__( ls = 1 else: ls = params - self._pfuns += [lambda eta1: xp.sin(ls * xp.pi * eta1)] + self._pfuns += [lambda eta1: np.sin(ls * np.pi * eta1)] elif pfun == "exp": self._pfuns += [ - lambda eta1: xp.exp(-((eta1 - params[0]) ** 2) / (2 * params[1] ** 2)) - / xp.sqrt(2 * xp.pi * params[1] ** 2), + lambda eta1: np.exp(-((eta1 - params[0]) ** 2) / (2 * params[1] ** 2)) + / np.sqrt(2 * np.pi * params[1] ** 2), ] elif pfun == "d_exp": self._pfuns += [ lambda eta1: -(eta1 - params[0]) / params[1] ** 2 - * xp.exp(-((eta1 - params[0]) ** 2) / (2 * params[1] ** 2)) - / xp.sqrt(2 * xp.pi * params[1] ** 2), + * np.exp(-((eta1 - params[0]) ** 2) / (2 * params[1] ** 2)) + / np.sqrt(2 * np.pi * params[1] ** 2), ] else: raise ValueError(f"Profile function {pfun} is not defined..") - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, eta1, eta2, eta3): val = 0.0 for mi, ni, pfun, amp in zip(self._ms, self._ns, self._pfuns, self._amps): val += ( amp * pfun(eta1) - * xp.sin( - mi * 2.0 * xp.pi * eta2 + ni * 2.0 * xp.pi * eta3, + * np.sin( + mi * 2.0 * np.pi * eta2 + ni * 2.0 * np.pi * eta3, ) ) return val -class TorusModesCos(Perturbation): +class TorusModesCos: r"""Cosinusoidal function in the periodic coordinates of a Torus. .. math:: @@ -1001,44 +949,59 @@ class TorusModesCos(Perturbation): \end{aligned} \right. - Can only be used in logical space (use 'given_in_basis'). + Can only be defined in logical coordinates. Parameters ---------- - ms : tuple[int] + ms : tuple | list[int] Poloidal mode numbers. - ns : tuple[int] + ns : tuple | list[int] Toroidal mode numbers. - pfuns : tuple[str] + pfuns : tuple | list[str] "sin" or "cos" or "exp" to define the profile functions. - amps : tuple[float] + amps : tuple | list[float] Amplitudes of each mode (m_i, n_i). pfun_params : tuple | list Provides :math:`[r_0, \sigma]` parameters for each "exp" profile fucntion, and l_s for "sin" and "cos". - given_in_basis : str - In which basis the perturbation is represented (see base class). - - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) + Note + ---- + In the parameter .yml, use the following template in the section ``fluid/``:: + + perturbations : + type : TorusModesCos + TorusModesCos : + comps : + n3 : null # choices: null, 'physical', '0', '3' + u2 : ['physical', 'v', '2'] # choices: null, 'physical', '1', '2', 'v', 'norm' + p3 : H1 # choices: null, 'physical', '0', '3' + ms : + n3: null # poloidal mode numbers + u2: [[0], [0], [0]] # poloidal mode numbers + p3: [0] # poloidal mode numbers + ns : + n3: null # toroidal mode numbers + u2: [[1], [1], [1]] # toroidal mode numbers + p3: [1] # toroidal mode numbers + amps : + n3: null # amplitudes of each mode + u2: [[0.001], [0.001], [0.001]] # amplitudes of each mode + p3: [0.01] # amplitudes of each mode + pfuns : + n3: null # profile function in eta1-direction ('sin' or 'cos' or 'exp' or 'd_exp') + u2: [['sin'], ['sin'], ['exp']] # profile function in eta1-direction ('sin' or 'cos' or 'exp' or 'd_exp') + p3: [0.01] # profile function in eta1-direction ('sin' or 'cos' or 'exp' or 'd_exp') + pfun_params : + n3: null # Provides [r_0, sigma] parameters for each "exp" and "d_exp" profile fucntion, and l_s for "sin" and "cos". + u2: [2, null, [[0.5, 1.]]] # Provides [r_0, sigma] parameters for each "exp" and "d_exp" profile fucntion, and l_s for "sin" and "cos". + p3: [0.01] # Provides [r_0, sigma] parameters for each "exp" and "d_exp" profile fucntion, and l_s for "sin" and "cos". """ - def __init__( - self, - ms: tuple = (2,), - ns: tuple = (1,), - amps: tuple = (0.1,), - pfuns: tuple = ("sin",), - pfun_params=None, - given_in_basis: GivenInBasis = "0", - comp: int = 0, - ): - assert "physical" not in given_in_basis - + def __init__(self, ms=None, ns=None, amps=(1e-4,), pfuns=("sin",), pfun_params=None): if ms is not None: n_modes = len(ms) elif ns is not None: @@ -1078,45 +1041,41 @@ def __init__( ls = 1 else: ls = params - self._pfuns += [lambda eta1: xp.sin(ls * xp.pi * eta1)] + self._pfuns += [lambda eta1: np.sin(ls * np.pi * eta1)] elif pfun == "cos": - self._pfuns += [lambda eta1: xp.cos(xp.pi * eta1)] + self._pfuns += [lambda eta1: np.cos(np.pi * eta1)] elif pfun == "exp": self._pfuns += [ - lambda eta1: xp.exp(-((eta1 - params[0]) ** 2) / (2 * params[1] ** 2)) - / xp.sqrt(2 * xp.pi * params[1] ** 2), + lambda eta1: np.exp(-((eta1 - params[0]) ** 2) / (2 * params[1] ** 2)) + / np.sqrt(2 * np.pi * params[1] ** 2), ] elif pfun == "d_exp": self._pfuns += [ lambda eta1: -(eta1 - params[0]) / params[1] ** 2 - * xp.exp(-((eta1 - params[0]) ** 2) / (2 * params[1] ** 2)) - / xp.sqrt(2 * xp.pi * params[1] ** 2), + * np.exp(-((eta1 - params[0]) ** 2) / (2 * params[1] ** 2)) + / np.sqrt(2 * np.pi * params[1] ** 2), ] else: raise ValueError( 'Profile function must be "sin" or "cos" or "exp".', ) - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, eta1, eta2, eta3): val = 0.0 for mi, ni, pfun, amp in zip(self._ms, self._ns, self._pfuns, self._amps): val += ( amp * pfun(eta1) - * xp.cos( - mi * 2.0 * xp.pi * eta2 + ni * 2.0 * xp.pi * eta3, + * np.cos( + mi * 2.0 * np.pi * eta2 + ni * 2.0 * np.pi * eta3, ) ) return val -class Shear_x(Perturbation): +class Shear_x: r"""Double shear layer in eta1 (-1 in outer regions, 1 in inner regions). .. math:: @@ -1133,36 +1092,32 @@ class Shear_x(Perturbation): delta : float Characteristic size of the shear layer - given_in_basis : str - In which basis the perturbation is represented (see base class). - - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) + Note + ---- + In the parameter .yml, use the following in the section ``fluid/``:: + + perturbations : + type : Shear_x + Shear_x : + comps : + rho3 : null # choices: null, 'physical', '0', '3' + uv : ['physical', 'v', '2'] # choices: null, 'physical', '1', '2', 'v', 'norm' + s3 : H1 # choices: null, 'physical', '0', '3' + amp : 0.001 # amplitudes of each mode + delta : 0.03333 # characteristic size of the shear layer """ - def __init__( - self, - amp=1e-4, - delta=1 / 15, - given_in_basis: GivenInBasis = "0", - comp: int = 0, - ): - assert "physical" not in given_in_basis, f"Perturbation {self.__name__} can only be used in logical space." - + def __init__(self, amp=1e-4, delta=1 / 15): self._amp = amp self._delta = delta - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, e1, e2, e3): - val = self._amp * (-xp.tanh((e1 - 0.75) / self._delta) + xp.tanh((e1 - 0.25) / self._delta) - 1) + val = self._amp * (-np.tanh((e1 - 0.75) / self._delta) + np.tanh((e1 - 0.25) / self._delta) - 1) return val -class Shear_y(Perturbation): +class Shear_y: r"""Double shear layer in eta2 (-1 in outer regions, 1 in inner regions). .. math:: @@ -1179,36 +1134,32 @@ class Shear_y(Perturbation): delta : float Characteristic size of the shear layer - given_in_basis : str - In which basis the perturbation is represented (see base class). - - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) + Note + ---- + In the parameter .yml, use the following in the section ``fluid/``:: + + perturbations : + type : Shear_y + Shear_y : + comps : + rho3 : null # choices: null, 'physical', '0', '3' + uv : ['physical', 'v', '2'] # choices: null, 'physical', '1', '2', 'v', 'norm' + s3 : H1 # choices: null, 'physical', '0', '3' + amp : 0.001 # amplitudes of each mode + delta : 0.03333 # characteristic size of the shear layer """ - def __init__( - self, - amp=1e-4, - delta=1 / 15, - given_in_basis: GivenInBasis = "0", - comp: int = 0, - ): - assert "physical" not in given_in_basis, f"Perturbation {self.__name__} can only be used in logical space." - + def __init__(self, amp=1e-4, delta=1 / 15): self._amp = amp self._delta = delta - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, e1, e2, e3): - val = self._amp * (-xp.tanh((e2 - 0.75) / self._delta) + xp.tanh((e2 - 0.25) / self._delta) - 1) + val = self._amp * (-np.tanh((e2 - 0.75) / self._delta) + np.tanh((e2 - 0.25) / self._delta) - 1) return val -class Shear_z(Perturbation): +class Shear_z: r"""Double shear layer in eta3 (-1 in outer regions, 1 in inner regions). .. math:: @@ -1225,36 +1176,32 @@ class Shear_z(Perturbation): delta : float Characteristic size of the shear layer - given_in_basis : str - In which basis the perturbation is represented (see base class). - - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) + Note + ---- + In the parameter .yml, use the following in the section ``fluid/``:: + + perturbations : + type : Shear_y + Shear_y : + comps : + rho3 : null # choices: null, 'physical', '0', '3' + uv : ['physical', 'v', '2'] # choices: null, 'physical', '1', '2', 'v', 'norm' + s3 : H1 # choices: null, 'physical', '0', '3' + amp : 0.001 # amplitudes of each mode + delta : 0.03333 # characteristic size of the shear layer """ - def __init__( - self, - amp=1e-4, - delta=1 / 15, - given_in_basis: GivenInBasis = "0", - comp: int = 0, - ): - assert "physical" not in given_in_basis, f"Perturbation {self.__name__} can only be used in logical space." - + def __init__(self, amp=1e-4, delta=1 / 15): self._amp = amp self._delta = delta - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, e1, e2, e3): - val = self._amp * (-xp.tanh((e3 - 0.75) / self._delta) + xp.tanh((e3 - 0.25) / self._delta) - 1) + val = self._amp * (-np.tanh((e3 - 0.75) / self._delta) + np.tanh((e3 - 0.25) / self._delta) - 1) return val -class Erf_z(Perturbation): +class Erf_z: r"""Shear layer in eta3 (-1 in lower regions, 1 in upper regions). .. math:: @@ -1271,29 +1218,25 @@ class Erf_z(Perturbation): delta : float Characteristic size of the shear layer - given_in_basis : str - In which basis the perturbation is represented (see base class). - - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) + Note + ---- + In the parameter .yml, use the following in the section ``fluid/``:: + + perturbations : + type : Erf_z + Erf_z : + comps : + b2 : ['2', null, null] # choices: null, 'physical', '0', '3' + amp : + b2 : [0.001] # amplitudes of each mode + delta : + b2 : [0.02] # characteristic size of the shear layer """ - def __init__( - self, - amp=1e-4, - delta=1 / 15, - given_in_basis: GivenInBasis = "0", - comp: int = 0, - ): - assert "physical" not in given_in_basis, f"Perturbation {self.__name__} can only be used in logical space." - + def __init__(self, amp=1e-4, delta=1 / 15): self._amp = amp self._delta = delta - # use the setters - self.given_in_basis = given_in_basis - self.comp = comp - def __call__(self, e1, e2, e3): from scipy.special import erf @@ -1302,7 +1245,7 @@ def __call__(self, e1, e2, e3): return val -class RestelliAnalyticSolutionVelocity(Perturbation): +class RestelliAnalyticSolutionVelocity: r"""Analytic solution :math:`u=u_e` of the system: .. math:: @@ -1331,6 +1274,8 @@ class RestelliAnalyticSolutionVelocity(Perturbation): Parameters ---------- + comp : string + Which component of the solution ('0', '1' or '2'). a : float Minor radius of torus (default: 1.). R0 : float @@ -1343,8 +1288,6 @@ class RestelliAnalyticSolutionVelocity(Perturbation): (default: 0.1) beta : float (default: 1.0) - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) References ---------- @@ -1352,16 +1295,8 @@ class RestelliAnalyticSolutionVelocity(Perturbation): in plasma physics, Journal of Computational Physics 2018. """ - def __init__( - self, - a=1.0, - R0=2.0, - B0=10.0, - Bp=12.5, - alpha=0.1, - beta=1.0, - comp: int = 0, - ): + def __init__(self, comp="0", a=1.0, R0=2.0, B0=10.0, Bp=12.5, alpha=0.1, beta=1.0): + self._comp = comp self._a = a self._R0 = R0 self._B0 = B0 @@ -1369,16 +1304,12 @@ def __init__( self._alpha = alpha self._beta = beta - # use the setters - self.given_in_basis = "physical" - self.comp = comp - # equilibrium ion velocity def __call__(self, x, y, z): """Velocity of ions and electrons.""" - R = xp.sqrt(x**2 + y**2) - R = xp.where(R == 0.0, 1e-9, R) - phi = xp.arctan2(-y, x) + R = np.sqrt(x**2 + y**2) + R = np.where(R == 0.0, 1e-9, R) + phi = np.arctan2(-y, x) ustarR = ( self._alpha * R / (self._a * self._R0) * (-z) + self._beta * self._Bp * self._R0 / (self._B0 * self._a * R) * z @@ -1395,20 +1326,20 @@ def __call__(self, x, y, z): # from cylindrical to cartesian: - if self.comp == 0: - ux = xp.cos(phi) * uR - R * xp.sin(phi) * uphi + if self._comp == "0": + ux = np.cos(phi) * uR - R * np.sin(phi) * uphi return ux - elif self.comp == 1: - uy = -xp.sin(phi) * uR - R * xp.cos(phi) * uphi + elif self._comp == "1": + uy = -np.sin(phi) * uR - R * np.cos(phi) * uphi return uy - elif self.comp == 2: + elif self._comp == "2": uz = uZ return uz else: - raise ValueError(f"Invalid component '{self._comp}'. Must be 0, 1, or 2.") + raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") -class RestelliAnalyticSolutionVelocity_2(Perturbation): +class RestelliAnalyticSolutionVelocity_2: r"""Analytic solution :math:`u=u_e` of the system: .. math:: @@ -1437,6 +1368,8 @@ class RestelliAnalyticSolutionVelocity_2(Perturbation): Parameters ---------- + comp : string + Which component of the solution ('0', '1' or '2'). a : float Minor radius of torus (default: 1.). R0 : float @@ -1449,8 +1382,6 @@ class RestelliAnalyticSolutionVelocity_2(Perturbation): (default: 0.1) beta : float (default: 1.0) - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) References ---------- @@ -1458,16 +1389,8 @@ class RestelliAnalyticSolutionVelocity_2(Perturbation): in plasma physics, Journal of Computational Physics 2018. """ - def __init__( - self, - a=1.0, - R0=2.0, - B0=10.0, - Bp=12.5, - alpha=0.1, - beta=1.0, - comp: int = 0, - ): + def __init__(self, comp="0", a=1.0, R0=2.0, B0=10.0, Bp=12.5, alpha=0.1, beta=1.0): + self._comp = comp self._a = a self._R0 = R0 self._B0 = B0 @@ -1475,16 +1398,12 @@ def __init__( self._alpha = alpha self._beta = beta - # use the setter - self.given_in_basis = "physical" - self.comp = comp - # equilibrium ion velocity def __call__(self, x, y, z): """Velocity of ions and electrons.""" - R = xp.sqrt(x**2 + y**2) - R = xp.where(R == 0.0, 1e-9, R) - phi = xp.arctan2(-y, x) + R = np.sqrt(x**2 + y**2) + R = np.where(R == 0.0, 1e-9, R) + phi = np.arctan2(-y, x) ustarR = ( self._alpha * R / (self._a * self._R0) * (-z) + self._beta * self._Bp * self._R0 / (self._B0 * self._a * R) * z @@ -1501,20 +1420,20 @@ def __call__(self, x, y, z): # from cylindrical to cartesian: - if self.comp == 0: - ux = xp.cos(phi) * uR - R * xp.sin(phi) * uphi + if self._comp == "0": + ux = np.cos(phi) * uR - R * np.sin(phi) * uphi return ux - elif self.comp == 1: - uy = -xp.sin(phi) * uR - R * xp.cos(phi) * uphi + elif self._comp == "1": + uy = -np.sin(phi) * uR - R * np.cos(phi) * uphi return uy - elif self.comp == 2: + elif self._comp == "2": uz = uZ return uz else: raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") -class RestelliAnalyticSolutionVelocity_3(Perturbation): +class RestelliAnalyticSolutionVelocity_3: r"""Analytic solution :math:`u=u_e` of the system: .. math:: @@ -1543,6 +1462,8 @@ class RestelliAnalyticSolutionVelocity_3(Perturbation): Parameters ---------- + comp : string + Which component of the solution ('0', '1' or '2'). a : float Minor radius of torus (default: 1.). R0 : float @@ -1555,8 +1476,6 @@ class RestelliAnalyticSolutionVelocity_3(Perturbation): (default: 0.1) beta : float (default: 1.0) - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) References ---------- @@ -1564,16 +1483,8 @@ class RestelliAnalyticSolutionVelocity_3(Perturbation): in plasma physics, Journal of Computational Physics 2018. """ - def __init__( - self, - a=1.0, - R0=2.0, - B0=10.0, - Bp=12.5, - alpha=0.1, - beta=1.0, - comp: int = 0, - ): + def __init__(self, comp="0", a=1.0, R0=2.0, B0=10.0, Bp=12.5, alpha=0.1, beta=1.0): + self._comp = comp self._a = a self._R0 = R0 self._B0 = B0 @@ -1581,16 +1492,12 @@ def __init__( self._alpha = alpha self._beta = beta - # use the setters - self.given_in_basis = "physical" - self.comp = comp - # equilibrium ion velocity def __call__(self, x, y, z): """Velocity of ions and electrons.""" - R = xp.sqrt(x**2 + y**2) - R = xp.where(R == 0.0, 1e-9, R) - phi = xp.arctan2(-y, x) + R = np.sqrt(x**2 + y**2) + R = np.where(R == 0.0, 1e-9, R) + phi = np.arctan2(-y, x) ustarR = ( self._alpha * R / (self._a * self._R0) * (-z) + self._beta * self._Bp * self._R0 / (self._B0 * self._a * R) * z @@ -1607,20 +1514,20 @@ def __call__(self, x, y, z): # from cylindrical to cartesian: - if self.comp == 0: - ux = xp.cos(phi) * uR - R * xp.sin(phi) * uphi + if self._comp == "0": + ux = np.cos(phi) * uR - R * np.sin(phi) * uphi return ux - elif self.comp == 1: - uy = -xp.sin(phi) * uR - R * xp.cos(phi) * uphi + elif self._comp == "1": + uy = -np.sin(phi) * uR - R * np.cos(phi) * uphi return uy - elif self.comp == 2: + elif self._comp == "2": uz = uZ return uz else: raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") -class RestelliAnalyticSolutionPotential(Perturbation): +class RestelliAnalyticSolutionPotential: r"""Analytic solution :math:`\phi` of the system: .. math:: @@ -1676,19 +1583,16 @@ def __init__(self, a=1.0, R0=2.0, B0=10.0, Bp=12.5, alpha=0.1, beta=1.0): self._alpha = alpha self._beta = beta - # use the setter - self.given_in_basis = "physical" - # equilibrium potential def __call__(self, x, y, z): """Equilibrium potential.""" - R = xp.sqrt(x**2 + y**2) + R = np.sqrt(x**2 + y**2) pp = 0.5 * self._a * self._B0 * self._alpha * (((R - self._R0) ** 2 + z**2) / self._a**2 - 2.0 / 3.0) return pp -class ManufacturedSolutionVelocity(Perturbation): +class ManufacturedSolutionVelocity: r"""Analytic solutions :math:`u` and :math:`u_e` of the system: .. math:: @@ -1720,49 +1624,38 @@ class ManufacturedSolutionVelocity(Perturbation): Defines the manufactured solution to be selected ('1D' or '2D'). b0 : float Magnetic field (default: 1.0). - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) """ - def __init__( - self, - species="Ions", - dimension="1D", - b0=1.0, - comp: int = 0, - ): + def __init__(self, species="Ions", comp="0", dimension="1D", b0=1.0): self._b = b0 self._species = species + self._comp = comp self._dimension = dimension - # use the setters - self.given_in_basis = "physical" - self.comp = comp - # equilibrium ion velocity def __call__(self, x, y, z): if self._species == "Ions": """Velocity of ions.""" """x component""" if self._dimension == "2D": - ux = -xp.sin(2 * xp.pi * x) * xp.sin(2 * xp.pi * y) + ux = -np.sin(2 * np.pi * x) * np.sin(2 * np.pi * y) elif self._dimension == "1D": - ux = xp.sin(2 * xp.pi * x) + 1.0 + ux = np.sin(2 * np.pi * x) + 1.0 """y component""" if self._dimension == "2D": - uy = -xp.cos(2 * xp.pi * x) * xp.cos(2 * xp.pi * y) + uy = -np.cos(2 * np.pi * x) * np.cos(2 * np.pi * y) elif self._dimension == "1D": - uy = xp.cos(2 * xp.pi * x) + uy = np.cos(2 * np.pi * x) """z component""" uz = 0.0 * x - if self.comp == 0: + if self._comp == "0": return ux - elif self.comp == 1: + elif self._comp == "1": return uy - elif self.comp == 2: + elif self._comp == "2": return uz else: raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") @@ -1771,24 +1664,24 @@ def __call__(self, x, y, z): """Velocity of electrons.""" """x component""" if self._dimension == "2D": - ux = -xp.sin(4 * xp.pi * x) * xp.sin(4 * xp.pi * y) + ux = -np.sin(4 * np.pi * x) * np.sin(4 * np.pi * y) elif self._dimension == "1D": - ux = xp.sin(2.0 * xp.pi * x) + ux = np.sin(2.0 * np.pi * x) """y component""" if self._dimension == "2D": - uy = -xp.cos(4 * xp.pi * x) * xp.cos(4 * xp.pi * y) + uy = -np.cos(4 * np.pi * x) * np.cos(4 * np.pi * y) elif self._dimension == "1D": - uy = xp.cos(2 * xp.pi * x) + uy = np.cos(2 * np.pi * x) """z component""" uz = 0.0 * x - if self.comp == 0: + if self._comp == "0": return ux - if self.comp == 1: + if self._comp == "1": return uy - if self.comp == 2: + if self._comp == "2": return uz else: raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") @@ -1797,7 +1690,7 @@ def __call__(self, x, y, z): raise ValueError(f"Invalid species '{self._species}'. Must be 'Ions' or 'Electrons'.") -class ManufacturedSolutionPotential(Perturbation): +class ManufacturedSolutionPotential: r"""Analytic solution :math:`\phi` of the system: .. math:: @@ -1837,21 +1730,18 @@ def __init__(self, dimension="1D", b0=1.0): self._ab = b0 self._dimension = dimension - # use the setter - self.given_in_basis = "physical" - # equilibrium ion velocity def __call__(self, x, y, z): """Potential.""" if self._dimension == "2D": - phi = xp.cos(2 * xp.pi * x) + xp.sin(2 * xp.pi * y) + phi = np.cos(2 * np.pi * x) + np.sin(2 * np.pi * y) elif self._dimension == "1D": - phi = xp.sin(2.0 * xp.pi * x) + phi = np.sin(2.0 * np.pi * x) return phi -class ManufacturedSolutionVelocity_2(Perturbation): +class ManufacturedSolutionVelocity_2: r"""Analytic solutions :math:`u` and :math:`u_e` of the system: .. math:: @@ -1877,53 +1767,44 @@ class ManufacturedSolutionVelocity_2(Perturbation): ---------- species : string 'Ions' or 'Electrons'. + comp : string + Which component of the solution ('0', '1' or '2'). dimension: string Defines the manufactured solution to be selected ('1D' or '2D'). b0 : float Magnetic field (default: 1.0). - comp : int - Which component (0, 1 or 2) of vector is perturbed (=0 for scalar-valued functions) """ - def __init__( - self, - species="Ions", - dimension="1D", - b0=1.0, - comp: int = 0, - ): + def __init__(self, species="Ions", comp="0", dimension="1D", b0=1.0): self._b = b0 self._species = species + self._comp = comp self._dimension = dimension - # use the setters - self.given_in_basis = "physical" - self.comp = comp - # equilibrium ion velocity def __call__(self, x, y, z): if self._species == "Ions": """Velocity of ions.""" """x component""" if self._dimension == "2D": - ux = -xp.sin(2 * xp.pi * x) * xp.sin(2 * xp.pi * y) + ux = -np.sin(2 * np.pi * x) * np.sin(2 * np.pi * y) elif self._dimension == "1D": - ux = xp.sin(2 * xp.pi * x) + 1.0 + ux = np.sin(2 * np.pi * x) + 1.0 """y component""" if self._dimension == "2D": - uy = -xp.cos(2 * xp.pi * x) * xp.cos(2 * xp.pi * y) + uy = -np.cos(2 * np.pi * x) * np.cos(2 * np.pi * y) elif self._dimension == "1D": - uy = xp.cos(2 * xp.pi * x) + uy = np.cos(2 * np.pi * x) """z component""" uz = 0.0 * x - if self.comp == 0: + if self._comp == "0": return ux - elif self.comp == 1: + elif self._comp == "1": return uy - elif self.comp == 2: + elif self._comp == "2": return uz else: raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") @@ -1932,24 +1813,24 @@ def __call__(self, x, y, z): """Velocity of electrons.""" """x component""" if self._dimension == "2D": - ux = -xp.sin(4 * xp.pi * x) * xp.sin(4 * xp.pi * y) + ux = -np.sin(4 * np.pi * x) * np.sin(4 * np.pi * y) elif self._dimension == "1D": - ux = xp.sin(2.0 * xp.pi * x) + ux = np.sin(2.0 * np.pi * x) """y component""" if self._dimension == "2D": - uy = -xp.cos(4 * xp.pi * x) * xp.cos(4 * xp.pi * y) + uy = -np.cos(4 * np.pi * x) * np.cos(4 * np.pi * y) elif self._dimension == "1D": - uy = xp.cos(2 * xp.pi * x) + uy = np.cos(2 * np.pi * x) """z component""" uz = 0.0 * x - if self.comp == 0: + if self._comp == "0": return ux - if self.comp == 1: + if self._comp == "1": return uy - if self.comp == 2: + if self._comp == "2": return uz else: raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") @@ -1958,58 +1839,7 @@ def __call__(self, x, y, z): raise ValueError(f"Invalid species '{self._species}'. Must be 'Ions' or 'Electrons'.") -class ITPA_density(Perturbation): - r"""ITPA radial density profile in `A. Könies et al. 2018 `_ - - .. math:: - - n(\eta_1) = n_0*c_3\exp\left[-\frac{c_2}{c_1}\tanh\left(\frac{\eta_1 - c_0}{c_2}\right)\right]\,. - """ - - def __init__( - self, - n0: float = 0.00720655, - c: tuple = (0.491230, 0.298228, 0.198739, 0.521298), - given_in_basis: GivenInBasis = "0", - comp: int = 0, - ): - """ - Parameters - ---------- - n0 : float - ITPA profile density - - c : tuple | list - 4 ITPA profile coefficients - """ - - assert len(c) == 4 - - self._n0 = n0 - self._c = c - - # use the setters - self.given_in_basis = "physical" - self.comp = comp - - def __call__(self, eta1, eta2=None, eta3=None): - val = 0.0 - - if self._c[2] == 0.0: - val = self._c[3] - 0 * eta1 - else: - val = ( - self._n0 - * self._c[3] - * xp.exp( - -self._c[2] / self._c[1] * xp.tanh((eta1 - self._c[0]) / self._c[2]), - ) - ) - - return val - - -class TokamakManufacturedSolutionVelocity(Perturbation): +class TokamakManufacturedSolutionVelocity: r"""Analytic solution :math:`u=u_e` of the system: .. math:: @@ -2063,16 +1893,7 @@ class TokamakManufacturedSolutionVelocity(Perturbation): in plasma physics, Journal of Computational Physics 2018. """ - def __init__( - self, - comp=0, - a=1.0, - R0=2.0, - B0=10.0, - Bp=12.5, - alpha=0.1, - beta=1.0, - ): + def __init__(self, comp="0", a=1.0, R0=2.0, B0=10.0, Bp=12.5, alpha=0.1, beta=1.0): self._comp = comp self._a = a self._R0 = R0 @@ -2081,16 +1902,12 @@ def __init__( self._alpha = alpha self._beta = beta - # use the setters - self.given_in_basis = "physical" - self.comp = comp - # equilibrium ion velocity def __call__(self, x, y, z): """Velocity of ions and electrons.""" - R = xp.sqrt(x**2 + y**2) - R = xp.where(R == 0.0, 1e-9, R) - phi = xp.arctan2(-y, x) + R = np.sqrt(x**2 + y**2) + R = np.where(R == 0.0, 1e-9, R) + phi = np.arctan2(-y, x) A = self._alpha / (self._a * self._R0) C = self._beta * self._Bp * self._R0 / (self._B0 * self._a) @@ -2100,20 +1917,20 @@ def __call__(self, x, y, z): # from cylindrical to cartesian: - if self.comp == 0: - ux = xp.cos(phi) * uR - R * xp.sin(phi) * uphi + if self._comp == "0": + ux = np.cos(phi) * uR - R * np.sin(phi) * uphi return ux - elif self.comp == 1: - uy = -xp.sin(phi) * uR - R * xp.cos(phi) * uphi + elif self._comp == "1": + uy = -np.sin(phi) * uR - R * np.cos(phi) * uphi return uy - elif self.comp == 2: + elif self._comp == "2": uz = uZ return uz else: raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") -class TokamakManufacturedSolutionVelocity_1(Perturbation): +class TokamakManufacturedSolutionVelocity_1: r"""Analytic solution :math:`u=u_e` of the system: .. math:: @@ -2167,16 +1984,7 @@ class TokamakManufacturedSolutionVelocity_1(Perturbation): in plasma physics, Journal of Computational Physics 2018. """ - def __init__( - self, - comp=0, - a=1.0, - R0=2.0, - B0=10.0, - Bp=12.5, - alpha=0.1, - beta=1.0, - ): + def __init__(self, comp="0", a=1.0, R0=2.0, B0=10.0, Bp=12.5, alpha=0.1, beta=1.0): self._comp = comp self._a = a self._R0 = R0 @@ -2185,16 +1993,12 @@ def __init__( self._alpha = alpha self._beta = beta - # use the setters - self.given_in_basis = "physical" - self.comp = comp - # equilibrium ion velocity def __call__(self, x, y, z): """Velocity of ions and electrons.""" - R = xp.sqrt(x**2 + y**2) - R = xp.where(R == 0.0, 1e-9, R) - phi = xp.arctan2(-y, x) + R = np.sqrt(x**2 + y**2) + R = np.where(R == 0.0, 1e-9, R) + phi = np.arctan2(-y, x) A = self._alpha / (self._a * self._R0) C = self._beta * self._Bp * self._R0 / (self._B0 * self._a) @@ -2204,20 +2008,20 @@ def __call__(self, x, y, z): # from cylindrical to cartesian: - if self.comp == 0: - ux = xp.cos(phi) * uR - R * xp.sin(phi) * uphi + if self._comp == "0": + ux = np.cos(phi) * uR - R * np.sin(phi) * uphi return ux - elif self.comp == 1: - uy = -xp.sin(phi) * uR - R * xp.cos(phi) * uphi + elif self._comp == "1": + uy = -np.sin(phi) * uR - R * np.cos(phi) * uphi return uy - elif self.comp == 2: + elif self._comp == "2": uz = uZ return uz else: raise ValueError(f"Invalid component '{self._comp}'. Must be '0', '1', or '2'.") -class TokamakManufacturedSolutionVelocity_2(Perturbation): +class TokamakManufacturedSolutionVelocity_2: r"""Analytic solution :math:`u=u_e` of the system: .. math:: @@ -2271,16 +2075,7 @@ class TokamakManufacturedSolutionVelocity_2(Perturbation): in plasma physics, Journal of Computational Physics 2018. """ - def __init__( - self, - comp=0, - a=1.0, - R0=2.0, - B0=10.0, - Bp=12.5, - alpha=0.1, - beta=1.0, - ): + def __init__(self, comp="0", a=1.0, R0=2.0, B0=10.0, Bp=12.5, alpha=0.1, beta=1.0): self._comp = comp self._a = a self._R0 = R0 @@ -2289,16 +2084,12 @@ def __init__( self._alpha = alpha self._beta = beta - # use the setters - self.given_in_basis = "physical" - self.comp = comp - # equilibrium ion velocity def __call__(self, x, y, z): """Velocity of ions and electrons.""" - R = xp.sqrt(x**2 + y**2) - R = xp.where(R == 0.0, 1e-9, R) - phi = xp.arctan2(-y, x) + R = np.sqrt(x**2 + y**2) + R = np.where(R == 0.0, 1e-9, R) + phi = np.arctan2(-y, x) A = self._alpha / (self._a * self._R0) C = self._beta * self._Bp * self._R0 / (self._B0 * self._a) @@ -2308,13 +2099,13 @@ def __call__(self, x, y, z): # from cylindrical to cartesian: - if self.comp == 0: - ux = xp.cos(phi) * uR - R * xp.sin(phi) * uphi + if self._comp == "0": + ux = np.cos(phi) * uR - R * np.sin(phi) * uphi return ux - elif self.comp == 1: - uy = -xp.sin(phi) * uR - R * xp.cos(phi) * uphi + elif self._comp == "1": + uy = -np.sin(phi) * uR - R * np.cos(phi) * uphi return uy - elif self.comp == 2: + elif self._comp == "2": uz = uZ return uz else: diff --git a/src/struphy/initial/tests/test_init_perturbations.py b/src/struphy/initial/tests/test_init_perturbations.py index 5d52e9291..c59565058 100644 --- a/src/struphy/initial/tests/test_init_perturbations.py +++ b/src/struphy/initial/tests/test_init_perturbations.py @@ -1,5 +1,4 @@ import inspect -from copy import deepcopy import pytest @@ -20,7 +19,6 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False): """Test the initialization Field.initialize_coeffs with all "Modes" classes in perturbations.py.""" - import cunumpy as xp from matplotlib import pyplot as plt from psydac.ddm.mpi import mpi as MPI @@ -28,8 +26,7 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False from struphy.geometry import domains from struphy.geometry.base import Domain from struphy.initial import perturbations - from struphy.initial.base import Perturbation - from struphy.models.variables import FEECVariable + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -50,10 +47,10 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False form_vector = ["1", "2", "v", "norm", "physical_at_eta"] # evaluation points - e1 = xp.linspace(0.0, 1.0, 30) - e2 = xp.linspace(0.0, 1.0, 40) - e3 = xp.linspace(0.0, 1.0, 50) - eee1, eee2, eee3 = xp.meshgrid(e1, e2, e3, indexing="ij") + e1 = np.linspace(0.0, 1.0, 30) + e2 = np.linspace(0.0, 1.0, 40) + e3 = np.linspace(0.0, 1.0, 50) + eee1, eee2, eee3 = np.meshgrid(e1, e2, e3, indexing="ij") # mode paramters kwargs = {} @@ -79,7 +76,7 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False form_vector += ["physical"] for key, val in inspect.getmembers(perturbations): - if inspect.isclass(val) and val.__module__ == perturbations.__name__: + if inspect.isclass(val): print(key, val) if key not in ("ModesCos", "ModesSin", "TorusModesCos", "TorusModesSin"): @@ -91,60 +88,63 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False ): continue - # instance of perturbation + # functions to compare to if "Torus" in key: - perturbation = val(**kwargs, pfuns=pfuns) + fun = val(**kwargs, pfuns=pfuns) else: - perturbation = val(**kwargs, ls=ls) + fun = val(**kwargs, ls=ls) if isinstance(domain, domains.Cuboid) or isinstance(domain, domains.Colella): - perturbation_xyz = val(**kwargs, ls=ls, Lx=Lx, Ly=Ly, Lz=Lz) - assert isinstance(perturbation, Perturbation) + fun_xyz = val(**kwargs, ls=ls, Lx=Lx, Ly=Ly, Lz=Lz) # single component is initialized - for space, form in derham.space_to_form.items(): + for space, name in derham.space_to_form.items(): if do_plot: - plt.figure(key + "_" + form + "-form_e1e2 " + mapping[0], figsize=(24, 16)) - plt.figure(key + "_" + form + "-form_e1e3 " + mapping[0], figsize=(24, 16)) + plt.figure(key + "_" + name + "-form_e1e2 " + mapping[0], figsize=(24, 16)) + plt.figure(key + "_" + name + "-form_e1e3 " + mapping[0], figsize=(24, 16)) - if form in ("0", "3"): + if name in ("0", "3"): for n, fun_form in enumerate(form_scalar): - if "Torus" in key and fun_form == "physical": - continue + params = {key: {"given_in_basis": fun_form}} - if "Modes" in key and fun_form == "physical": - perturbation._Lx = Lx - perturbation._Ly = Ly - perturbation._Lz = Lz + if "Modes" in key: + params[key]["ls"] = ls + params[key]["ms"] = kwargs["ms"] + params[key]["ns"] = kwargs["ns"] + params[key]["amps"] = kwargs["amps"] + if fun_form == "physical": + params[key]["Lx"] = Lx + params[key]["Ly"] = Ly + params[key]["Lz"] = Lz else: - perturbation._Lx = 1.0 - perturbation._Ly = 1.0 - perturbation._Lz = 1.0 - # use the setter - perturbation.given_in_basis = fun_form + raise ValueError(f'Perturbation {key} not implemented, only "Modes" are testes.') + + if "Torus" in key: + params[key].pop("ls") + if fun_form == "physical": + continue + params[key]["pfuns"] = pfuns - var = FEECVariable(space=space) - var.add_perturbation(perturbation) - var.allocate(derham, domain) - field = var.spline + field = derham.create_spline_function(name, space, pert_params=params) + field.initialize_coeffs(domain=domain) - field_vals_xyz = domain.push(field, e1, e2, e3, kind=form) + field_vals_xyz = domain.push(field, e1, e2, e3, kind=name) x, y, z = domain(e1, e2, e3) - r = xp.sqrt(x**2 + y**2) + r = np.sqrt(x**2 + y**2) if fun_form == "physical": - fun_vals_xyz = perturbation_xyz(x, y, z) + fun_vals_xyz = fun_xyz(x, y, z) elif fun_form == "physical_at_eta": - fun_vals_xyz = perturbation(eee1, eee2, eee3) + fun_vals_xyz = fun(eee1, eee2, eee3) else: - fun_vals_xyz = domain.push(perturbation, eee1, eee2, eee3, kind=fun_form) + fun_vals_xyz = domain.push(fun, eee1, eee2, eee3, kind=fun_form) - error = xp.max(xp.abs(field_vals_xyz - fun_vals_xyz)) / xp.max(xp.abs(fun_vals_xyz)) - print(f"{rank=}, {key=}, {form=}, {fun_form=}, {error=}") + error = np.max(np.abs(field_vals_xyz - fun_vals_xyz)) / np.max(np.abs(fun_vals_xyz)) + print(f"{rank=}, {key=}, {name=}, {fun_form=}, {error=}") assert error < 0.02 if do_plot: - plt.figure(key + "_" + form + "-form_e1e2 " + mapping[0]) + plt.figure(key + "_" + name + "-form_e1e2 " + mapping[0]) plt.subplot(2, 4, n + 1) if isinstance(domain, domains.HollowTorus): plt.contourf(r[:, :, 0], z[:, :, 0], field_vals_xyz[:, :, 0]) @@ -169,11 +169,11 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False plt.xlabel("x") plt.ylabel("y") plt.colorbar() - plt.title("exact function") + plt.title(f"exact function") ax = plt.gca() ax.set_aspect("equal", adjustable="box") - plt.figure(key + "_" + form + "-form_e1e3 " + mapping[0]) + plt.figure(key + "_" + name + "-form_e1e3 " + mapping[0]) plt.subplot(2, 4, n + 1) if isinstance(domain, domains.HollowTorus): plt.contourf(x[:, 0, :], y[:, 0, :], field_vals_xyz[:, 0, :]) @@ -198,31 +198,16 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False plt.xlabel("x") plt.ylabel("z") plt.colorbar() - plt.title("exact function") + plt.title(f"exact function") ax = plt.gca() ax.set_aspect("equal", adjustable="box") else: for n, fun_form in enumerate(form_vector): - if "Torus" in key and fun_form == "physical": - continue - - if "Modes" in key and fun_form == "physical": - perturbation._Lx = Lx - perturbation._Ly = Ly - perturbation._Lz = Lz - else: - perturbation._Lx = 1.0 - perturbation._Ly = 1.0 - perturbation._Lz = 1.0 - perturbation_0 = perturbation - perturbation_1 = deepcopy(perturbation) - perturbation_2 = deepcopy(perturbation) - params = { key: { "given_in_basis": [fun_form] * 3, - }, + } } if "Modes" in key: @@ -232,70 +217,59 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False else: raise ValueError(f'Perturbation {key} not implemented, only "Modes" are testes.') - if "Torus" not in key and isinstance(domain, domains.HollowTorus): - continue - - # use the setters - perturbation_0.given_in_basis = fun_form - perturbation_0.comp = 0 - perturbation_1.given_in_basis = fun_form - perturbation_1.comp = 1 - perturbation_2.given_in_basis = fun_form - perturbation_2.comp = 2 - - var = FEECVariable(space=space) - var.add_perturbation(perturbation_0) - var.add_perturbation(perturbation_1) - var.add_perturbation(perturbation_2) - var.allocate(derham, domain) - field = var.spline - - f1_xyz, f2_xyz, f3_xyz = domain.push(field, e1, e2, e3, kind=form) + if "Torus" in key: + # params[key].pop('ls') + if fun_form == "physical": + continue + params[key]["pfuns"] = [pfuns] * 3 + else: + params[key]["ls"] = [ls] * 3 + if fun_form == "physical": + params[key]["Lx"] = Lx + params[key]["Ly"] = Ly + params[key]["Lz"] = Lz + if isinstance(domain, domains.HollowTorus): + continue + + field = derham.create_spline_function(name, space, pert_params=params) + field.initialize_coeffs(domain=domain) + + f1_xyz, f2_xyz, f3_xyz = domain.push(field, e1, e2, e3, kind=name) f_xyz = [f1_xyz, f2_xyz, f3_xyz] x, y, z = domain(e1, e2, e3) - r = xp.sqrt(x**2 + y**2) + r = np.sqrt(x**2 + y**2) # exact values if fun_form == "physical": - fun1_xyz = perturbation_xyz(x, y, z) - fun2_xyz = perturbation_xyz(x, y, z) - fun3_xyz = perturbation_xyz(x, y, z) + fun1_xyz = fun_xyz(x, y, z) + fun2_xyz = fun_xyz(x, y, z) + fun3_xyz = fun_xyz(x, y, z) elif fun_form == "physical_at_eta": - fun1_xyz = perturbation(eee1, eee2, eee3) - fun2_xyz = perturbation(eee1, eee2, eee3) - fun3_xyz = perturbation(eee1, eee2, eee3) + fun1_xyz = fun(eee1, eee2, eee3) + fun2_xyz = fun(eee1, eee2, eee3) + fun3_xyz = fun(eee1, eee2, eee3) elif fun_form == "norm": tmp1, tmp2, tmp3 = domain.transform( - [perturbation, perturbation, perturbation], - eee1, - eee2, - eee3, - kind=fun_form + "_to_v", + [fun, fun, fun], eee1, eee2, eee3, kind=fun_form + "_to_v" ) fun1_xyz, fun2_xyz, fun3_xyz = domain.push([tmp1, tmp2, tmp3], eee1, eee2, eee3, kind="v") else: - fun1_xyz, fun2_xyz, fun3_xyz = domain.push( - [perturbation, perturbation, perturbation], - eee1, - eee2, - eee3, - kind=fun_form, - ) + fun1_xyz, fun2_xyz, fun3_xyz = domain.push([fun, fun, fun], eee1, eee2, eee3, kind=fun_form) fun_xyz_vec = [fun1_xyz, fun2_xyz, fun3_xyz] error = 0.0 for fi, funi in zip(f_xyz, fun_xyz_vec): - error += xp.max(xp.abs(fi - funi)) / xp.max(xp.abs(funi)) + error += np.max(np.abs(fi - funi)) / np.max(np.abs(funi)) error /= 3.0 - print(f"{rank=}, {key=}, {form=}, {fun_form=}, {error=}") + print(f"{rank=}, {key=}, {name=}, {fun_form=}, {error=}") assert error < 0.02 if do_plot: rn = len(form_vector) for c, (fi, f) in enumerate(zip(f_xyz, fun_xyz_vec)): - plt.figure(key + "_" + form + "-form_e1e2 " + mapping[0]) + plt.figure(key + "_" + name + "-form_e1e2 " + mapping[0]) plt.subplot(3, rn, rn * c + n + 1) if isinstance(domain, domains.HollowTorus): plt.contourf(r[:, :, 0], z[:, :, 0], fi[:, :, 0]) @@ -307,12 +281,12 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False plt.ylabel("y") plt.colorbar() plt.title( - f"component {c + 1}, init was {fun_form}, (m,n)=({kwargs['ms'][0]},{kwargs['ns'][0]})", + f"component {c + 1}, init was {fun_form}, (m,n)=({kwargs['ms'][0]},{kwargs['ns'][0]})" ) ax = plt.gca() ax.set_aspect("equal", adjustable="box") - plt.figure(key + "_" + form + "-form_e1e3 " + mapping[0]) + plt.figure(key + "_" + name + "-form_e1e3 " + mapping[0]) plt.subplot(3, rn, rn * c + n + 1) if isinstance(domain, domains.HollowTorus): plt.contourf(x[:, 0, :], y[:, 0, :], fi[:, 0, :]) @@ -324,7 +298,7 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False plt.ylabel("z") plt.colorbar() plt.title( - f"component {c + 1}, init was {fun_form}, (m,n)=({kwargs['ms'][0]},{kwargs['ns'][0]})", + f"component {c + 1}, init was {fun_form}, (m,n)=({kwargs['ms'][0]},{kwargs['ns'][0]})" ) ax = plt.gca() ax.set_aspect("equal", adjustable="box") @@ -334,8 +308,8 @@ def test_init_modes(Nel, p, spl_kind, mapping, combine_comps=None, do_plot=False if __name__ == "__main__": - # mapping = ['Colella', {'Lx': 4., 'Ly': 5., 'alpha': .07, 'Lz': 6.}] - mapping = ["HollowCylinder", {"a1": 0.1}] + mapping = ["Colella", {"Lx": 4.0, "Ly": 5.0, "alpha": 0.07, "Lz": 6.0}] + # mapping = ['HollowCylinder', {'a1': 0.1}] # mapping = ['Cuboid', {'l1': 0., 'r1': 4., 'l2': 0., 'r2': 5., 'l3': 0., 'r3': 6.}] test_init_modes([16, 16, 16], [2, 3, 4], [False, True, True], mapping, combine_comps=None, do_plot=False) # mapping = ["HollowTorus", {"tor_period": 1}] diff --git a/src/struphy/initial/utilities.py b/src/struphy/initial/utilities.py index 7af042249..da5edd45a 100644 --- a/src/struphy/initial/utilities.py +++ b/src/struphy/initial/utilities.py @@ -1,10 +1,10 @@ import os -import cunumpy as xp import h5py from struphy.fields_background.equils import set_defaults from struphy.io.output_handling import DataContainer +from struphy.utils.arrays import xp as np class InitFromOutput: @@ -98,6 +98,6 @@ def __init__( self._amp = amp def __call__(self, x, y, z): - val = self._amp * xp.random.rand(*x.shape).squeeze() + val = self._amp * np.random.rand(*x.shape).squeeze() return val diff --git a/src/struphy/io/output_handling.py b/src/struphy/io/output_handling.py index 808c2bcf3..79e23692b 100644 --- a/src/struphy/io/output_handling.py +++ b/src/struphy/io/output_handling.py @@ -1,9 +1,10 @@ import ctypes import os -import cunumpy as xp import h5py +from struphy.utils.arrays import xp as np + class DataContainer: """ @@ -54,7 +55,7 @@ def __init__(self, path_out, file_name=None, comm=None): dataset_keys = [] self._file.visit( - lambda key: dataset_keys.append(key) if isinstance(self._file[key], h5py.Dataset) else None, + lambda key: dataset_keys.append(key) if isinstance(self._file[key], h5py.Dataset) else None ) for key in dataset_keys: @@ -82,11 +83,11 @@ def add_data(self, data_dict): Parameters ---------- data_dict : dict - Name-object pairs to save during time stepping, e.g. {key : val}. key must be a string and val must be a xp.array of fixed shape. Scalar values (floats) must therefore be passed as 1d arrays of size 1. + Name-object pairs to save during time stepping, e.g. {key : val}. key must be a string and val must be a np.array of fixed shape. Scalar values (floats) must therefore be passed as 1d arrays of size 1. """ for key, val in data_dict.items(): - assert isinstance(val, xp.ndarray) + assert isinstance(val, np.ndarray) # if dataset already exists, check for compatibility with given array if key in self._dset_dict: @@ -110,11 +111,7 @@ def add_data(self, data_dict): self._file[key][0] = val[0] else: self._file.create_dataset( - key, - (1,) + val.shape, - maxshape=(None,) + val.shape, - dtype=val.dtype, - chunks=True, + key, (1,) + val.shape, maxshape=(None,) + val.shape, dtype=val.dtype, chunks=True ) self._file[key][0] = val diff --git a/src/struphy/io/setup.py b/src/struphy/io/setup.py index 4ecd96f47..f7ec8dfbf 100644 --- a/src/struphy/io/setup.py +++ b/src/struphy/io/setup.py @@ -1,93 +1,200 @@ -import glob -import importlib.util -import os -import shutil -import sys -from types import ModuleType - -import cunumpy as xp +from dataclasses import dataclass + from psydac.ddm.mpi import mpi as MPI -from struphy.geometry.base import Domain -from struphy.io.options import DerhamOptions -from struphy.topology.grids import TensorProductGrid +from struphy.utils.arrays import xp as np +from struphy.utils.utils import dict_to_yaml -def import_parameters_py(params_path: str) -> ModuleType: - """Import a .py parameter file under the module name 'parameters' and return it.""" - assert ".py" in params_path - spec = importlib.util.spec_from_file_location("parameters", params_path) - params_in = importlib.util.module_from_spec(spec) - sys.modules["parameters"] = params_in - spec.loader.exec_module(params_in) - return params_in +def derive_units( + Z_bulk: int = None, + A_bulk: int = None, + x: float = 1.0, + B: float = 1.0, + n: float = 1.0, + kBT: float = None, + velocity_scale: str = "alfvén", +): + """Computes units used in Struphy model's :ref:`normalization`. + Input units from parameter file: -def setup_folders( - path_out: str, - restart: bool, - verbose: bool = False, -): + * Length (m) + * Magnetic field (T) + * Number density (10^20 1/m^3) + * Thermal energy (keV), optional + + Velocity unit is defined here: + + * Velocity (m/s) + + Derived units using mass and charge number of bulk species: + + * Time (s) + * Pressure (Pa) + * Mass density (kg/m^3) + * Current density (A/m^2) + + Parameters + --------- + Z_bulk : int + Charge number of bulk species. + + A_bulk : int + Mass number of bulk species. + + x : float + Unit of length (in meters). + + B : float + Unit of magnetic field (in Tesla). + + n : float + Unit of particle number density (in 1e20 per cubic meter). + + kBT : float + Unit of internal energy (in keV). Only in effect if the velocity scale is set to 'thermal'. + + velocity_scale : str + Velocity scale to be used ("alfvén", "cyclotron", "light" or "thermal"). + + Returns + ------- + units : dict + The Struphy units defined above and some Physics constants. """ - Setup output folders. + + units = {} + + # physics constants + units["elementary charge"] = 1.602176634e-19 # elementary charge (C) + units["proton mass"] = 1.67262192369e-27 # proton mass (kg) + units["mu0"] = 1.25663706212e-6 # magnetic constant (N/A^2) + units["eps0"] = 8.8541878128e-12 # vacuum permittivity (F/m) + units["kB"] = 1.380649e-23 # Boltzmann constant (J/K) + units["speed of light"] = 299792458 # speed of light (m/s) + + e = units["elementary charge"] + mH = units["proton mass"] + mu0 = units["mu0"] + eps0 = units["eps0"] + kB = units["kB"] + c = units["speed of light"] + + # length (m) + units["x"] = x + # magnetic field (T) + units["B"] = B + # number density (1/m^3) + units["n"] = n * 1e20 + + # velocity (m/s) + if velocity_scale is None: + units["v"] = 1.0 + + elif velocity_scale == "light": + units["v"] = 1.0 * c + + elif velocity_scale == "alfvén": + assert A_bulk is not None, 'Need bulk species to choose velocity scale "alfvén".' + units["v"] = units["B"] / np.sqrt(units["n"] * A_bulk * mH * mu0) + + elif velocity_scale == "cyclotron": + assert Z_bulk is not None, 'Need bulk species to choose velocity scale "cyclotron".' + assert A_bulk is not None, 'Need bulk species to choose velocity scale "cyclotron".' + units["v"] = Z_bulk * e * units["B"] / (A_bulk * mH) * units["x"] + + elif velocity_scale == "thermal": + assert A_bulk is not None, 'Need bulk species to choose velocity scale "thermal".' + assert kBT is not None + units["v"] = np.sqrt(kBT * 1000 * e / (mH * A_bulk)) + + # time (s) + units["t"] = units["x"] / units["v"] + if A_bulk is None: + return units + + # pressure (Pa), equal to B^2/mu0 if velocity_scale='alfvén' + units["p"] = A_bulk * mH * units["n"] * units["v"] ** 2 + + # mass density (kg/m^3) + units["rho"] = A_bulk * mH * units["n"] + + # current density (A/m^2) + units["j"] = e * units["n"] * units["v"] + + return units + + +def setup_domain_and_equil(params: dict, units: dict = None): """ - if MPI.COMM_WORLD.Get_rank() == 0: - if verbose: - print("\nPREPARATION AND CLEAN-UP:") + Creates the domain object and equilibrium for a given parameter file. - # create output folder if it does not exit - if not os.path.exists(path_out): - os.makedirs(path_out, exist_ok=True) - if verbose: - print("Created folder " + path_out) + Parameters + ---------- + params : dict + The full simulation parameter dictionary. - # create data folder in output folder if it does not exist - if not os.path.exists(os.path.join(path_out, "data/")): - os.mkdir(os.path.join(path_out, "data/")) - if verbose: - print("Created folder " + os.path.join(path_out, "data/")) + units : dict + All Struphy units. + + Returns + ------- + domain : Domain + The Struphy domain object for evaluating the mapping F : [0, 1]^3 --> R^3 and the corresponding metric coefficients. + + equil : FluidEquilibrium + The equilibrium object. + """ + + from struphy.fields_background import equils + from struphy.fields_background.base import ( + NumericalFluidEquilibrium, + NumericalFluidEquilibriumWithB, + NumericalMHDequilibrium, + ) + from struphy.geometry import domains + + if "fluid_background" in params: + for eq_type, eq_params in params["fluid_background"].items(): + eq_class = getattr(equils, eq_type) + if eq_type in ("EQDSKequilibrium", "GVECequilibrium", "DESCequilibrium"): + equil = eq_class(**eq_params, units=units) + else: + equil = eq_class(**eq_params) + + # for numerical equilibria, the domain comes from the equilibrium + if isinstance(equil, (NumericalMHDequilibrium, NumericalFluidEquilibrium, NumericalFluidEquilibriumWithB)): + domain = equil.domain + # for all other equilibria, the domain can be chosen idependently else: - # remove post_processing folder - folder = os.path.join(path_out, "post_processing") - if os.path.exists(folder): - shutil.rmtree(folder) - if verbose: - print("Removed existing folder " + folder) + dom_type = params["geometry"]["type"] + dom_class = getattr(domains, dom_type) - # remove meta file - file = os.path.join(path_out, "meta.txt") - if os.path.exists(file): - os.remove(file) - if verbose: - print("Removed existing file " + file) + if dom_type == "Tokamak": + domain = dom_class(**params["geometry"][dom_type], equilibrium=equil) + else: + domain = dom_class(**params["geometry"][dom_type]) - # remove profiling file - file = os.path.join(path_out, "profile_tmp") - if os.path.exists(file): - os.remove(file) - if verbose: - print("Removed existing file " + file) + # set domain attribute in mhd object + equil.domain = domain - # remove .png files (if NOT a restart) - if not restart: - files = glob.glob(os.path.join(path_out, "*.png")) - for n, file in enumerate(files): - os.remove(file) - if verbose and n < 10: # print only ten statements in case of many processes - print("Removed existing file " + file) + # no equilibrium (just load domain) + else: + dom_type = params["geometry"]["type"] + dom_class = getattr(domains, dom_type) + domain = dom_class(**params["geometry"][dom_type]) - files = glob.glob(os.path.join(path_out, "data", "*.hdf5")) - for n, file in enumerate(files): - os.remove(file) - if verbose and n < 10: # print only ten statements in case of many processes - print("Removed existing file " + file) + equil = None + + return domain, equil def setup_derham( - grid: TensorProductGrid, - options: DerhamOptions, - comm: MPI.Intracomm = None, - domain: Domain = None, + params_grid, + comm=None, + domain=None, + mpi_dims_mask=None, verbose=False, ): """ @@ -95,15 +202,19 @@ def setup_derham( Parameters ---------- - grid : TensorProductGrid - The FEEC grid. + params_grid : dict + Grid parameters dictionary. comm: Intracomm MPI communicator (sub_comm if clones are used). - domain : Domain, optional + domain : struphy.geometry.base.Domain, optional The Struphy domain object for evaluating the mapping F : [0, 1]^3 --> R^3 and the corresponding metric coefficients. + mpi_dims_mask: list of bool + True if the dimension is to be used in the domain decomposition (=default for each dimension). + If mpi_dims_mask[i]=False, the i-th dimension will not be decomposed. + verbose : bool Show info on screen. @@ -116,31 +227,28 @@ def setup_derham( from struphy.feec.psydac_derham import Derham # number of grid cells - Nel = grid.Nel - # mpi - mpi_dims_mask = grid.mpi_dims_mask - + Nel = params_grid["Nel"] # spline degrees - p = options.p + p = params_grid["p"] # spline types (clamped vs. periodic) - spl_kind = options.spl_kind + spl_kind = params_grid["spl_kind"] # boundary conditions (Homogeneous Dirichlet or None) - dirichlet_bc = options.dirichlet_bc + dirichlet_bc = params_grid["dirichlet_bc"] # Number of quadrature points per histopolation cell - nq_pr = options.nq_pr + nq_pr = params_grid["nq_pr"] # Number of quadrature points per grid cell for L^2 - nquads = options.nquads + nq_el = params_grid["nq_el"] # C^k smoothness at eta_1=0 for polar domains - polar_ck = options.polar_ck + polar_ck = params_grid["polar_ck"] # local commuting projectors - local_projectors = options.local_projectors + local_projectors = params_grid["local_projectors"] derham = Derham( Nel, p, spl_kind, dirichlet_bc=dirichlet_bc, - nquads=nquads, + nquads=nq_el, nq_pr=nq_pr, comm=comm, mpi_dims_mask=mpi_dims_mask, @@ -152,12 +260,12 @@ def setup_derham( if MPI.COMM_WORLD.Get_rank() == 0 and verbose: print("\nDERHAM:") - print("number of elements:".ljust(25), Nel) - print("spline degrees:".ljust(25), p) - print("periodic bcs:".ljust(25), spl_kind) - print("hom. Dirichlet bc:".ljust(25), dirichlet_bc) - print("GL quad pts (L2):".ljust(25), nquads) - print("GL quad pts (hist):".ljust(25), nq_pr) + print(f"number of elements:".ljust(25), Nel) + print(f"spline degrees:".ljust(25), p) + print(f"periodic bcs:".ljust(25), spl_kind) + print(f"hom. Dirichlet bc:".ljust(25), dirichlet_bc) + print(f"GL quad pts (L2):".ljust(25), nq_el) + print(f"GL quad pts (hist):".ljust(25), nq_pr) print( "MPI proc. per dir.:".ljust(25), derham.domain_decomposition.nprocs, @@ -168,6 +276,203 @@ def setup_derham( return derham +def pre_processing( + model_name: str, + parameters: dict | str, + path_out: str, + restart: bool, + max_sim_time: int, + save_step: int, + mpi_rank: int, + mpi_size: int, + use_mpi: bool, + num_clones: int, + verbose: bool = False, +): + """ + Prepares simulation parameters, output folder and prints some information of the run to the screen. + + Parameters + ---------- + model_name : str + The name of the model to run. + + parameters : dict | str + The simulation parameters. Can either be a dictionary OR a string (path of .yml parameter file) + + path_out : str + The output directory. Will create a folder if it does not exist OR cleans the folder for new runs. + + restart : bool + Whether to restart a run. + + max_sim_time : int + Maximum run time of simulation in minutes. Will finish the time integration once this limit is reached. + + save_step : int + When to save data output: every time step (save_step=1), every second time step (save_step=2). + + mpi_rank : int + The rank of the calling process. + + mpi_size : int + Total number of MPI processes of the run. + + use_mpi: bool + True if MPI.COMM_WORLD is not None. + + num_clones: int + Number of domain clones. + + verbose : bool + Show full screen output. + + Returns + ------- + params : dict + The simulation parameters. + """ + + import datetime + import glob + import os + import shutil + import sysconfig + + import yaml + + from struphy.models import fluid, hybrid, kinetic, toy + + # prepare output folder + if mpi_rank == 0: + if verbose: + print("\nPREPARATION AND CLEAN-UP:") + + # create output folder if it does not exit + if not os.path.exists(path_out): + os.makedirs(path_out, exist_ok=True) + if verbose: + print("Created folder " + path_out) + + # create data folder in output folder if it does not exist + if not os.path.exists(os.path.join(path_out, "data/")): + os.mkdir(os.path.join(path_out, "data/")) + if verbose: + print("Created folder " + os.path.join(path_out, "data/")) + + # clean output folder if it already exists + else: + # remove post_processing folder + folder = os.path.join(path_out, "post_processing") + if os.path.exists(folder): + shutil.rmtree(folder) + if verbose: + print("Removed existing folder " + folder) + + # remove meta file + file = os.path.join(path_out, "meta.txt") + if os.path.exists(file): + os.remove(file) + if verbose: + print("Removed existing file " + file) + + # remove profiling file + file = os.path.join(path_out, "profile_tmp") + if os.path.exists(file): + os.remove(file) + if verbose: + print("Removed existing file " + file) + + # remove .png files (if NOT a restart) + if not restart: + files = glob.glob(os.path.join(path_out, "*.png")) + for n, file in enumerate(files): + os.remove(file) + if verbose and n < 10: # print only ten statements in case of many processes + print("Removed existing file " + file) + + files = glob.glob(os.path.join(path_out, "data", "*.hdf5")) + for n, file in enumerate(files): + os.remove(file) + if verbose and n < 10: # print only ten statements in case of many processes + print("Removed existing file " + file) + + # save "parameters" dictionary as .yml file + if isinstance(parameters, dict): + parameters_path = os.path.join(path_out, "parameters.yml") + + # write parameters to file and save it in output folder + if mpi_rank == 0: + dict_to_yaml(parameters, parameters_path) + + params = parameters + + # OR load parameters if "parameters" is a string (path) + else: + parameters_path = parameters + + with open(parameters) as file: + params = yaml.load(file, Loader=yaml.FullLoader) + + if model_name is None: + assert "model" in params, "If model is not specified, then model: MODEL must be specified in the params!" + model_name = params["model"] + + if mpi_rank == 0: + # copy parameter file to output folder + if parameters_path != os.path.join(path_out, "parameters.yml"): + shutil.copy2( + parameters_path, + os.path.join( + path_out, + "parameters.yml", + ), + ) + + # print simulation info + print("\nMETADATA:") + print("platform:".ljust(25), sysconfig.get_platform()) + print("python version:".ljust(25), sysconfig.get_python_version()) + print("model:".ljust(25), model_name) + print("MPI processes:".ljust(25), mpi_size) + print("use MPI.COMM_WORLD:".ljust(25), use_mpi) + print("number of domain clones:".ljust(25), num_clones) + print("parameter file:".ljust(25), parameters_path) + print("output folder:".ljust(25), path_out) + print("restart:".ljust(25), restart) + print("max wall-clock [min]:".ljust(25), max_sim_time) + print("save interval [steps]:".ljust(25), save_step) + + # write meta data to output folder + with open(path_out + "/meta.txt", "w") as f: + f.write( + "date of simulation: ".ljust( + 30, + ) + + str(datetime.datetime.now()) + + "\n", + ) + f.write("platform: ".ljust(30) + sysconfig.get_platform() + "\n") + f.write( + "python version: ".ljust( + 30, + ) + + sysconfig.get_python_version() + + "\n", + ) + f.write("model_name: ".ljust(30) + model_name + "\n") + f.write("processes: ".ljust(30) + str(mpi_size) + "\n") + f.write("use MPI.COMM_WORLD: ".ljust(30) + str(use_mpi) + "\n") + f.write("output folder:".ljust(30) + path_out + "\n") + f.write("restart:".ljust(30) + str(restart) + "\n") + f.write( + "max wall-clock time [min]:".ljust(30) + str(max_sim_time) + "\n", + ) + f.write("save interval (steps):".ljust(30) + str(save_step) + "\n") + + return params + + def descend_options_dict( d: dict, out: list | dict, @@ -233,35 +538,35 @@ def descend_options_dict( out = copy.deepcopy(d) if verbose: - print(f"{d =}") - print(f"{out =}") - print(f"{d_default =}") - print(f"{d_opts =}") - print(f"{keys =}") - print(f"{depth =}") - print(f"{pop_again =}") + print(f"{d = }") + print(f"{out = }") + print(f"{d_default = }") + print(f"{d_opts = }") + print(f"{keys = }") + print(f"{depth = }") + print(f"{pop_again = }") if verbose: - print(f"{d =}") - print(f"{out =}") - print(f"{d_default =}") - print(f"{d_opts =}") - print(f"{keys =}") - print(f"{depth =}") - print(f"{pop_again =}") + print(f"{d = }") + print(f"{out = }") + print(f"{d_default = }") + print(f"{d_opts = }") + print(f"{keys = }") + print(f"{depth = }") + print(f"{pop_again = }") count = 0 for key, val in d.items(): count += 1 if verbose: - print(f"\n{keys =} | {key =}, {type(val) =}, {count =}\n") + print(f"\n{keys = } | {key = }, {type(val) = }, {count = }\n") if isinstance(val, list): # create default parameter dict "out" if verbose: - print(f"{val =}") + print(f"{val = }") if d_default is None: if len(keys) == 0: @@ -299,10 +604,10 @@ def descend_options_dict( out += [out_sublist] if verbose: - print(f"{out =}") + print(f"{out = }") if verbose: - print(f"{out =}") + print(f"{out = }") # recurse if necessary elif isinstance(val, dict): diff --git a/src/struphy/kinetic_background/base.py b/src/struphy/kinetic_background/base.py index 765ee1508..a694a0b1e 100644 --- a/src/struphy/kinetic_background/base.py +++ b/src/struphy/kinetic_background/base.py @@ -1,12 +1,14 @@ "Base classes for kinetic backgrounds." +import copy from abc import ABCMeta, abstractmethod -from typing import Callable -import cunumpy as xp - -from struphy.fields_background.base import FluidEquilibriumWithB -from struphy.initial.base import Perturbation +from struphy.fields_background.base import FluidEquilibrium +from struphy.fields_background.equils import set_defaults +from struphy.initial import perturbations +from struphy.initial.utilities import Noise +from struphy.kinetic_background import moment_functions +from struphy.utils.arrays import xp as np class KineticBackground(metaclass=ABCMeta): @@ -44,8 +46,8 @@ def is_polar(self): @property @abstractmethod - def volume_form(self) -> bool: - """True if the background is represented as a volume form (thus including the velocity Jacobian).""" + def volume_form(self): + """Boolean. True if the background is represented as a volume form (thus including the velocity Jacobian).""" pass @abstractmethod @@ -104,7 +106,7 @@ def __call__(self, *args): Returns ------- - f0 : xp.ndarray + f0 : np.ndarray The evaluated background. """ pass @@ -119,12 +121,12 @@ def __rmul__(self, a): return ScalarMultiplyKineticBackground(self, a) def __div__(self, a): - assert isinstance(a, float) or isinstance(a, int) or isinstance(a, xp.int64) + assert isinstance(a, float) or isinstance(a, int) or isinstance(a, np.int64) assert a != 0, "Cannot divide by zero!" return ScalarMultiplyKineticBackground(self, 1 / a) def __rdiv__(self, a): - assert isinstance(a, float) or isinstance(a, int) or isinstance(a, xp.int64) + assert isinstance(a, float) or isinstance(a, int) or isinstance(a, np.int64) assert a != 0, "Cannot divide by zero!" return ScalarMultiplyKineticBackground(self, 1 / a) @@ -143,10 +145,6 @@ def __init__(self, f1, f2): self._f1 = f1 self._f2 = f2 - if hasattr(f1, "_equil"): - assert f1.equil is f2.equil - self._equil = f1.equil - @property def coords(self): """Coordinates of the distribution.""" @@ -167,13 +165,6 @@ def volume_form(self): """Boolean. True if the background is represented as a volume form (thus including the velocity Jacobian).""" return self._f1.volume_form - @property - def equil(self) -> FluidEquilibriumWithB: - """Fluid background with B-field.""" - if not hasattr(self, "_equil"): - self._equil = None - return self._equil - def velocity_jacobian_det(self, eta1, eta2, eta3, *v): """Jacobian determinant of the velocity coordinate transformation.""" return self._f1.velocity_jacobian_det(eta1, eta2, eta3, *v) @@ -207,10 +198,8 @@ def u(self, *etas): n1 = self._f1.n(*etas) n2 = self._f2.n(*etas) - u1s = self._f1.u(*etas) - u2s = self._f2.u(*etas) - return [(n1 * u1 + n2 * u2) / (n1 + n2) for u1, u2 in zip(u1s, u2s)] + return [(n1 * u1 + n2 * u2) / (n1 + n2) for u1, u2 in zip(self._f1.u(*etas), self._f2.u(*etas))] def __call__(self, *args): """Evaluates the background distribution function f0(etas, v1, ..., vn). @@ -232,7 +221,7 @@ def __call__(self, *args): Returns ------- - f0 : xp.ndarray + f0 : np.ndarray The evaluated background. """ return self._f1(*args) + self._f2(*args) @@ -241,7 +230,7 @@ def __call__(self, *args): class ScalarMultiplyKineticBackground(KineticBackground): def __init__(self, f0, a): assert isinstance(f0, KineticBackground) - assert isinstance(a, float) or isinstance(a, int) or isinstance(a, xp.int64) + assert isinstance(a, float) or isinstance(a, int) or isinstance(a, np.int64) self._f = f0 self._a = a @@ -318,7 +307,7 @@ def __call__(self, *args): Returns ------- - f0 : xp.ndarray + f0 : np.ndarray The evaluated background. """ return self._a * self._f(*args) @@ -339,6 +328,40 @@ class Maxwellian(KineticBackground): and the thermal velocities :math:`v_{\mathrm{th},i}(\boldsymbol{\eta})`. """ + def __init__( + self, + maxw_params: dict = None, + pert_params: dict = None, + equil: FluidEquilibrium = None, + ): + # Set background parameters + if maxw_params is None: + maxw_params = {} + assert isinstance(maxw_params, dict) + self._maxw_params = set_defaults( + maxw_params, + self.default_maxw_params(), + ) + + # check if fluid background is needed + for key, val in self.maxw_params.items(): + if val == "fluid_background": + assert equil is not None + + # parameters for perturbation + if pert_params is None: + pert_params = {} + assert isinstance(pert_params, dict) + self._pert_params = pert_params + + # Fluid equilibrium + self._equil = equil + + @classmethod + def default_maxw_params(cls): + """Default parameters dictionary defining constant moments of the Maxwellian.""" + pass + @abstractmethod def vth(self, *etas): """Thermal velocities (0-forms). @@ -355,18 +378,21 @@ def vth(self, *etas): pass @property - @abstractmethod - def maxw_params(self) -> dict: - """Parameters dictionary defining moments of the Maxwellian.""" + def maxw_params(self): + """Parameters dictionary defining constant moments of the Maxwellian.""" + return self._maxw_params - def check_maxw_params(self): - for k, v in self.maxw_params.items(): - assert isinstance(k, str) - assert isinstance(v, tuple), f"Maxwallian parameter {k} must be tuple, but is {v}" - assert len(v) == 2 + @property + def pert_params(self): + """Parameters dictionary defining the perturbations.""" + return self._pert_params - assert isinstance(v[0], (float, int, Callable)) - assert isinstance(v[1], Perturbation) or v[1] is None + @property + def equil(self): + """One of :mod:`~struphy.fields_background.equils` + in case that moments are to be set in that way, None otherwise. + """ + return self._equil @classmethod def gaussian(self, v, u=0.0, vth=1.0, polar=False, volume_form=False): @@ -394,14 +420,14 @@ def gaussian(self, v, u=0.0, vth=1.0, polar=False, volume_form=False): An array of size(v). """ - if isinstance(v, xp.ndarray) and isinstance(u, xp.ndarray): - assert v.shape == u.shape, f"{v.shape =} but {u.shape =}" + if isinstance(v, np.ndarray) and isinstance(u, np.ndarray): + assert v.shape == u.shape, f"{v.shape = } but {u.shape = }" if not polar: - out = 1.0 / vth * 1.0 / xp.sqrt(2.0 * xp.pi) * xp.exp(-((v - u) ** 2) / (2.0 * vth**2)) + out = 1.0 / vth * 1.0 / np.sqrt(2.0 * np.pi) * np.exp(-((v - u) ** 2) / (2.0 * vth**2)) else: - assert xp.all(v >= 0.0) - out = 1.0 / vth**2 * xp.exp(-((v - u) ** 2) / (2.0 * vth**2)) + assert np.all(v >= 0.0) + out = 1.0 / vth**2 * np.exp(-((v - u) ** 2) / (2.0 * vth**2)) if volume_form: out *= v @@ -427,16 +453,16 @@ def __call__(self, *args): Returns ------- - f : xp.ndarray + f : np.ndarray The evaluated Maxwellian. """ # Check that all args have the same shape - shape0 = xp.shape(args[0]) + shape0 = np.shape(args[0]) for i, arg in enumerate(args): - assert xp.shape(arg) == shape0, f"Argument {i} has {xp.shape(arg) =}, but must be {shape0 =}." - assert xp.ndim(arg) == 1 or xp.ndim(arg) == 3 + self.vdim, ( - f"{xp.ndim(arg) =} not allowed for Maxwellian evaluation." + assert np.shape(arg) == shape0, f"Argument {i} has {np.shape(arg) = }, but must be {shape0 = }." + assert np.ndim(arg) == 1 or np.ndim(arg) == 3 + self.vdim, ( + f"{np.ndim(arg) = } not allowed for Maxwellian evaluation." ) # flat or meshgrid evaluation # Get result evaluated at eta's @@ -445,33 +471,33 @@ def __call__(self, *args): vths = self.vth(*args[: -self.vdim]) # take care of correct broadcasting, assuming args come from phase space meshgrid - if xp.ndim(args[0]) > 3: + if np.ndim(args[0]) > 3: # move eta axes to the back - arg_t = xp.moveaxis(args[0], 0, -1) - arg_t = xp.moveaxis(arg_t, 0, -1) - arg_t = xp.moveaxis(arg_t, 0, -1) + arg_t = np.moveaxis(args[0], 0, -1) + arg_t = np.moveaxis(arg_t, 0, -1) + arg_t = np.moveaxis(arg_t, 0, -1) # broadcast res_broad = res + 0.0 * arg_t # move eta axes to the front - res = xp.moveaxis(res_broad, -1, 0) - res = xp.moveaxis(res, -1, 0) - res = xp.moveaxis(res, -1, 0) + res = np.moveaxis(res_broad, -1, 0) + res = np.moveaxis(res, -1, 0) + res = np.moveaxis(res, -1, 0) # Multiply result with gaussian in v's for i, v in enumerate(args[-self.vdim :]): # correct broadcasting - if xp.ndim(args[0]) > 3: + if np.ndim(args[0]) > 3: u_broad = us[i] + 0.0 * arg_t - u = xp.moveaxis(u_broad, -1, 0) - u = xp.moveaxis(u, -1, 0) - u = xp.moveaxis(u, -1, 0) + u = np.moveaxis(u_broad, -1, 0) + u = np.moveaxis(u, -1, 0) + u = np.moveaxis(u, -1, 0) vth_broad = vths[i] + 0.0 * arg_t - vth = xp.moveaxis(vth_broad, -1, 0) - vth = xp.moveaxis(vth, -1, 0) - vth = xp.moveaxis(vth, -1, 0) + vth = np.moveaxis(vth_broad, -1, 0) + vth = np.moveaxis(vth, -1, 0) + vth = np.moveaxis(vth, -1, 0) else: u = us[i] vth = vths[i] @@ -480,7 +506,7 @@ def __call__(self, *args): return res - def _evaluate_moment(self, eta1, eta2, eta3, *, name: str = "n", add_perturbation: bool = None): + def _evaluate_moment(self, eta1, eta2, eta3, *, name="n"): """Scalar moment evaluation as background + perturbation. Parameters @@ -491,28 +517,21 @@ def _evaluate_moment(self, eta1, eta2, eta3, *, name: str = "n", add_perturbatio name : str Which moment to evaluate (see varaible "dct" below). - add_perturbation : bool | None - Whether to add the perturbation defined in maxw_params. If None, is taken from self.add_perturbation. - Returns ------- A float (background value) or a numpy.array of the evaluated scalar moment. """ # collect arguments - assert isinstance(eta1, xp.ndarray) - assert isinstance(eta2, xp.ndarray) - assert isinstance(eta3, xp.ndarray) + assert isinstance(eta1, np.ndarray) + assert isinstance(eta2, np.ndarray) + assert isinstance(eta3, np.ndarray) assert eta1.shape == eta2.shape == eta3.shape - params = self.maxw_params[name] - assert isinstance(params, tuple) - assert len(params) == 2 - # flat evaluation for markers if eta1.ndim == 1: etas = [ - xp.concatenate( + np.concatenate( (eta1[:, None], eta2[:, None], eta3[:, None]), axis=1, ), @@ -545,38 +564,232 @@ def _evaluate_moment(self, eta1, eta2, eta3, *, name: str = "n", add_perturbatio else: out = 0.0 * etas[0] - # evaluate background - background = params[0] - if isinstance(background, (float, int)): - out += background + # correspondence name -> equilibrium attribute + dct = { + "n": "n0", + "u1": "u_cart_1", + "u2": "u_cart_2", + "u3": "u_cart_3", + "vth1": "vth0", + "vth2": "vth0", + "vth3": "vth0", + "u_para": "u_para0", + "u_perp": None, + "vth_para": "vth0", + "vth_perp": "vth0", + } + + # fluid background + if self.maxw_params[name] == "fluid_background": + if dct[name] is not None: + out += getattr(self.equil, dct[name])(*etas) + if name in ("n") or "vth" in name: + assert np.all(out > 0.0), f"{name} must be positive!" + else: + print(f'Moment evaluation with "fluid_background" not implemented for {name}.') + + # when using moment functions, see test https://gitlab.mpcdf.mpg.de/struphy/struphy/-/blob/devel/src/struphy/kinetic_background/tests/test_maxwellians.py?ref_type=heads#L1760 + elif isinstance(self.maxw_params[name], dict): + mom_funcs = copy.deepcopy(self.maxw_params[name]) + for typ, params in mom_funcs.items(): + assert params["given_in_basis"] == "0", "Moment functions must be passed as 0-forms to Maxwellians." + params.pop("given_in_basis") + nfun = getattr(moment_functions, typ)(**params) + if eta1.ndim == 1: + out += nfun(eta1, eta2, eta3) + else: + out += nfun(*etas) + + # constant background else: - assert callable(background) - # if eta1.ndim == 1: - # out += background(eta1, eta2, eta3) - # else: - out += background(*etas) - - # add perturbation - if add_perturbation is None: - add_perturbation = self.add_perturbation - - perturbation = params[1] - if perturbation is not None and add_perturbation: - assert isinstance(perturbation, Perturbation) if eta1.ndim == 1: - out += perturbation(eta1, eta2, eta3) + out += self.maxw_params[name] else: - out += perturbation(*etas) + out += self.maxw_params[name] + + # add possible perturbations + if name in self.pert_params: + pp_copy = copy.deepcopy(self.pert_params) + for pert, params in pp_copy[name].items(): + if pert == "Noise": + noise = Noise(**params) + if eta1.ndim == 1: + out += noise(eta1, eta2, eta3) + else: + out += noise(*etas) + else: + assert params["given_in_basis"] == "0", ( + "Moment perturbations must be passed as 0-forms to Maxwellians." + ) + params.pop("given_in_basis") + + perturbation = getattr(perturbations, pert)( + **params, + ) + + if eta1.ndim == 1: + out += perturbation(eta1, eta2, eta3) + else: + out += perturbation(*etas) return out + +class CanonicalMaxwellian(metaclass=ABCMeta): + r"""Base class for a canonical Maxwellian distribution function. + It is defined by three constants of motion in the axissymmetric toroidal system: + + - Shifted canonical toroidal momentum + + .. math:: + + \psi_c = \psi + \frac{m_s F}{q_s B}v_\parallel - \text{sign}(v_\parallel)\sqrt{2(\epsilon - \mu B)}\frac{m_sF}{q_sB} \mathcal{H}(\epsilon - \mu B), + + - Energy + + .. math:: + + \epsilon = \frac{1}{2}m_sv_\parallel² + \mu B, + + - Magnetic moment + + .. math:: + + \mu = \frac{m_s v_\perp²}{2B}, + + where :math:`\psi` is the poloidal magnetic flux function, :math:`F=F(\psi)` is the poloidal current function and :math:`\mathcal{H}` is the Heaviside function. + + With the three constants of motion, a canonical Maxwellian distribution function is defined as + + .. math:: + + F(\psi_c, \epsilon, \mu) = \frac{n(\psi_c)}{(2\pi)^{3/2}v_\text{th}³(\psi_c)} \text{exp}\left[ - \frac{\epsilon}{v_\text{th}²(\psi_c)}\right]. + + """ + @property - def add_perturbation(self) -> bool: - if not hasattr(self, "_add_perturbation"): - self._add_perturbation = True - return self._add_perturbation - - @add_perturbation.setter - def add_perturbation(self, new): - assert isinstance(new, bool) - self._add_perturbation = new + @abstractmethod + def coords(self): + """Coordinates of the distribution.""" + pass + + @abstractmethod + def velocity_jacobian_det(self, eta1, eta2, eta3, *v): + """Jacobian determinant of the velocity coordinate transformation.""" + pass + + @abstractmethod + def n(self, psic): + """Number density (0-form). + + Parameters + ---------- + psic : numpy.arrays + Shifted canonical toroidal momentum. + + Returns + ------- + A numpy.array with the density evaluated at evaluation points (same shape as etas). + """ + pass + + @abstractmethod + def vth(self, psic): + """Thermal velocities (0-forms). + + Parameters + ---------- + psic : numpy.arrays + Shifted canonical toroidal momentum. + + Returns + ------- + A numpy.array with the thermal velocity evaluated at evaluation points (one dimension more than etas). + The additional dimension is in the first index. + """ + pass + + def gaussian(self, e, vth=1.0): + """3-dim. normal distribution, to which array-valued thermal velocities can be passed. + + Parameters + ---------- + e : float | array-like + Energy. + + vth : float | array-like + Thermal velocity evaluated at psic. + + Returns + ------- + An array of size(e). + """ + + if isinstance(vth, np.ndarray): + assert e.shape == vth.shape, f"{e.shape = } but {vth.shape = }" + + return 2.0 * np.sqrt(e / np.pi) / vth**3 * np.exp(-e / vth**2) + + def __call__(self, *args): + """Evaluates the canonical Maxwellian distribution function. + + There are two use-cases for this function in the code: + + 1. Evaluating for particles ("flat evaluation", inputs are all 1D of length N_p) + 2. Evaluating the function on a meshgrid (in phase space). + + Hence all arguments must always have + + 1. the same shape + 2. either ndim = 1 or ndim = 3. + + Parameters + ---------- + *args : array_like + Position-velocity arguments in the order energy, magnetic moment, canonical toroidal momentum. + + Returns + ------- + f : np.ndarray + The evaluated Maxwellian. + """ + + # Check that all args have the same shape + shape0 = np.shape(args[0]) + for i, arg in enumerate(args): + assert np.shape(arg) == shape0, f"Argument {i} has {np.shape(arg) = }, but must be {shape0 = }." + assert np.ndim(arg) == 1 or np.ndim(arg) == 3, ( + f"{np.ndim(arg) = } not allowed for canonical Maxwellian evaluation." + ) # flat or meshgrid evaluation + + # Get result evaluated with each particles' psic + res = self.n(args[2]) + vths = self.vth(args[2]) + + # take care of correct broadcasting, assuming args come from phase space meshgrid + if np.ndim(args[0]) == 3: + # move eta axes to the back + arg_t = np.moveaxis(args[0], 0, -1) + arg_t = np.moveaxis(arg_t, 0, -1) + arg_t = np.moveaxis(arg_t, 0, -1) + + # broadcast + res_broad = res + 0.0 * arg_t + + # move eta axes to the front + res = np.moveaxis(res_broad, -1, 0) + res = np.moveaxis(res, -1, 0) + res = np.moveaxis(res, -1, 0) + + # Multiply result with gaussian in energy + if np.ndim(args[0]) == 3: + vth_broad = vths + 0.0 * arg_t + vth = np.moveaxis(vth_broad, -1, 0) + vth = np.moveaxis(vth, -1, 0) + vth = np.moveaxis(vth, -1, 0) + else: + vth = vths + + res *= self.gaussian(args[0], vth=vth) + + return res diff --git a/src/struphy/kinetic_background/maxwellians.py b/src/struphy/kinetic_background/maxwellians.py index d9e34dcab..7965b4e04 100644 --- a/src/struphy/kinetic_background/maxwellians.py +++ b/src/struphy/kinetic_background/maxwellians.py @@ -1,13 +1,10 @@ "Maxwellian (Gaussian) distributions in velocity space." -from typing import Callable - -import cunumpy as xp - -from struphy.fields_background.base import FluidEquilibriumWithB +from struphy.fields_background.base import FluidEquilibrium from struphy.fields_background.equils import set_defaults -from struphy.initial.base import Perturbation -from struphy.kinetic_background.base import Maxwellian +from struphy.kinetic_background import moment_functions +from struphy.kinetic_background.base import CanonicalMaxwellian, Maxwellian +from struphy.utils.arrays import xp as np class Maxwellian3D(Maxwellian): @@ -15,31 +12,40 @@ class Maxwellian3D(Maxwellian): Parameters ---------- - n, ui, vthi : tuple - Moments of the Maxwellian as tuples. The first entry defines the background - (float for constant background or callable), the second entry defines a Perturbation (can be None). + maxw_params : dict + Parameters for the kinetic background. + + pert_params : dict + Parameters for the kinetic perturbation added to the background. + + equil : FluidEquilibrium + One of :mod:`~struphy.fields_background.equils`. """ + @classmethod + def default_maxw_params(cls): + """Default parameters dictionary defining constant moments of the Maxwellian.""" + return { + "n": 1.0, + "u1": 0.0, + "u2": 0.0, + "u3": 0.0, + "vth1": 1.0, + "vth2": 1.0, + "vth3": 1.0, + } + def __init__( self, - n: tuple[float | Callable, Perturbation] = (1.0, None), - u1: tuple[float | Callable, Perturbation] = (0.0, None), - u2: tuple[float | Callable, Perturbation] = (0.0, None), - u3: tuple[float | Callable, Perturbation] = (0.0, None), - vth1: tuple[float | Callable, Perturbation] = (1.0, None), - vth2: tuple[float | Callable, Perturbation] = (1.0, None), - vth3: tuple[float | Callable, Perturbation] = (1.0, None), + maxw_params: dict = None, + pert_params: dict = None, + equil: FluidEquilibrium = None, ): - self._maxw_params = {} - self._maxw_params["n"] = n - self._maxw_params["u1"] = u1 - self._maxw_params["u2"] = u2 - self._maxw_params["u3"] = u3 - self._maxw_params["vth1"] = vth1 - self._maxw_params["vth2"] = vth2 - self._maxw_params["vth3"] = vth3 - - self.check_maxw_params() + super().__init__( + maxw_params=maxw_params, + pert_params=pert_params, + equil=equil, + ) # factors multiplied onto the defined moments n, u and vth (can be set via setter) self._moment_factors = { @@ -48,10 +54,6 @@ def __init__( "vth": [1.0, 1.0, 1.0], } - @property - def maxw_params(self): - return self._maxw_params - @property def coords(self): """Coordinates of the Maxwellian6D, :math:`(v_1, v_2, v_3)`.""" @@ -142,18 +144,14 @@ class GyroMaxwellian2D(Maxwellian): Parameters ---------- - n, u_para, u_perp, vth_para, vth_perp : tuple - Moments of the Maxwellian as tuples. The first entry defines the background - (float for constant background or callable), the second entry defines a Perturbation (can be None). - maxw_params : dict Parameters for the kinetic background. pert_params : dict Parameters for the kinetic perturbation added to the background. - equil : FluidEquilibriumWithB - Fluid background. + equil : FluidEquilibrium + One of :mod:`~struphy.fields_background.equils`. volume_form : bool Whether to represent the Maxwellian as a volume form; @@ -161,28 +159,32 @@ class GyroMaxwellian2D(Maxwellian): of the polar coordinate transofrmation (default = False). """ + @classmethod + def default_maxw_params(cls): + """Default parameters dictionary defining constant moments of the Maxwellian.""" + return { + "n": 1.0, + "u_para": 0.0, + "u_perp": 0.0, + "vth_para": 1.0, + "vth_perp": 1.0, + } + def __init__( self, - n: tuple[float | Callable, Perturbation] = (1.0, None), - u_para: tuple[float | Callable, Perturbation] = (0.0, None), - u_perp: tuple[float | Callable, Perturbation] = (0.0, None), - vth_para: tuple[float | Callable, Perturbation] = (1.0, None), - vth_perp: tuple[float | Callable, Perturbation] = (1.0, None), - equil: FluidEquilibriumWithB = None, + maxw_params: dict = None, + pert_params: dict = None, + equil: FluidEquilibrium = None, volume_form: bool = True, ): - self._maxw_params = {} - self._maxw_params["n"] = n - self._maxw_params["u_para"] = u_para - self._maxw_params["u_perp"] = u_perp - self._maxw_params["vth_para"] = vth_para - self._maxw_params["vth_perp"] = vth_perp - - self.check_maxw_params() + super().__init__( + maxw_params=maxw_params, + pert_params=pert_params, + equil=equil, + ) # volume form represenation self._volume_form = volume_form - self._equil = equil # factors multiplied onto the defined moments n, u and vth (can be set via setter) self._moment_factors = { @@ -191,10 +193,6 @@ def __init__( "vth": [1.0, 1.0], } - @property - def maxw_params(self): - return self._maxw_params - @property def coords(self): r"""Coordinates of the Maxwellian5D, :math:`(v_\parallel, v_\perp)`.""" @@ -251,7 +249,7 @@ def velocity_jacobian_det(self, eta1, eta2, eta3, *v): assert len(v) == 2 # call equilibrium - etas = (xp.vstack((eta1, eta2, eta3)).T).copy() + etas = (np.vstack((eta1, eta2, eta3)).T).copy() absB0 = self.equil.absB0(etas) # J = v_perp/B @@ -260,15 +258,10 @@ def velocity_jacobian_det(self, eta1, eta2, eta3, *v): return jacobian_det @property - def volume_form(self) -> bool: + def volume_form(self): """Boolean. True if the background is represented as a volume form (thus including the velocity Jacobian |v_perp|).""" return self._volume_form - @property - def equil(self) -> FluidEquilibriumWithB: - """Fluid background with B-field.""" - return self._equil - @property def moment_factors(self): """Collection of factors multiplied onto the defined moments n, u, and vth.""" @@ -301,35 +294,8 @@ def vth(self, eta1, eta2, eta3): return [ou * mom_fac for ou, mom_fac in zip(out, self.moment_factors["vth"])] -class CanonicalMaxwellian: - r"""canonical Maxwellian distribution function. - It is defined by three constants of motion in the axissymmetric toroidal system: - - - Shifted canonical toroidal momentum - - .. math:: - - \psi_c = \psi + \frac{m_s F}{q_s B}v_\parallel - \text{sign}(v_\parallel)\sqrt{2(\epsilon - \mu B)}\frac{m_sF}{q_sB} \mathcal{H}(\epsilon - \mu B), - - - Energy - - .. math:: - - \epsilon = \frac{1}{2}m_sv_\parallel² + \mu B, - - - Magnetic moment - - .. math:: - - \mu = \frac{m_s v_\perp²}{2B}, - - where :math:`\psi` is the poloidal magnetic flux function, :math:`F=F(\psi)` is the poloidal current function and :math:`\mathcal{H}` is the Heaviside function. - - With the three constants of motion, a canonical Maxwellian distribution function is defined as - - .. math:: - - F(\psi_c, \epsilon, \mu) = \frac{n(\psi_c)}{(2\pi)^{3/2}v_\text{th}³(\psi_c)} \text{exp}\left[ - \frac{\epsilon}{v_\text{th}²(\psi_c)}\right]. +class CanonicalMaxwellian(CanonicalMaxwellian): + r"""A :class:`~struphy.kinetic_background.base.CanonicalMaxwellian`. Parameters ---------- @@ -339,8 +305,8 @@ class CanonicalMaxwellian: pert_params : dict Parameters for the kinetic perturbation added to the background. - equil : FluidEquilibriumWithB - Fluid background. + equil : FluidEquilibrium + One of :mod:`~struphy.fields_background.equils`. volume_form : bool Whether to represent the Maxwellian as a volume form; @@ -348,22 +314,46 @@ class CanonicalMaxwellian: of the polar coordinate transofrmation (default = False). """ + @classmethod + def default_maxw_params(cls): + """Default parameters dictionary defining constant moments of the Maxwellian.""" + return { + "n": 1.0, + "vth": 1.0, + "type": "Particles5D", + } + def __init__( self, - n: tuple[float | Callable, Perturbation] = (1.0, None), - vth: tuple[float | Callable, Perturbation] = (1.0, None), - equil: FluidEquilibriumWithB = None, + maxw_params: dict = None, + pert_params: dict = None, + equil: FluidEquilibrium = None, volume_form: bool = True, ): - self._maxw_params = {} - self._maxw_params["n"] = n - self._maxw_params["vth"] = vth + # Set background parameters + self._maxw_params = self.default_maxw_params() + + if maxw_params is not None: + assert isinstance(maxw_params, dict) + self._maxw_params = set_defaults( + maxw_params, + self.default_maxw_params(), + ) + + # Set parameters for perturbation + self._pert_params = pert_params + + if self.pert_params is not None: + assert isinstance(pert_params, dict) + assert "type" in self.pert_params, '"type" is mandatory in perturbation dictionary.' + ptype = self.pert_params["type"] + assert ptype in self.pert_params, f"{ptype} is mandatory in perturbation dictionary." + self._pert_type = ptype - self.check_maxw_params() + self._equil = equil # volume form represenation self._volume_form = volume_form - self._equil = equil # factors multiplied onto the defined moments n and vth (can be set via setter) self._moment_factors = { @@ -382,21 +372,17 @@ def maxw_params(self): return self._maxw_params @property - def equil(self) -> FluidEquilibriumWithB: + def pert_params(self): + """Parameters dictionary defining the perturbations of the :meth:`~Maxwellian5D.maxw_params`.""" + return self._pert_params + + @property + def equil(self): """One of :mod:`~struphy.fields_background.equils` in case that moments are to be set in that way, None otherwise. """ return self._equil - def check_maxw_params(self): - for k, v in self.maxw_params.items(): - assert isinstance(k, str) - assert isinstance(v, tuple), f"Maxwallian parameter {k} must be tuple, but is {v}" - assert len(v) == 2 - - assert isinstance(v[0], (float, int, Callable)) - assert isinstance(v[1], Perturbation) or v[1] is None - def velocity_jacobian_det(self, eta1, eta2, eta3, energy): r"""TODO""" @@ -405,99 +391,14 @@ def velocity_jacobian_det(self, eta1, eta2, eta3, energy): assert eta3.ndim == 1 if self.maxw_params["type"] == "Particles6D": - return xp.sqrt(2.0 * energy) * 4.0 * xp.pi + return np.sqrt(2.0 * energy) * 4.0 * np.pi else: # call equilibrium - etas = (xp.vstack((eta1, eta2, eta3)).T).copy() + etas = (np.vstack((eta1, eta2, eta3)).T).copy() absB0 = self.equil.absB0(etas) - return xp.sqrt(energy) * 2.0 * xp.sqrt(2.0) / absB0 - - def gaussian(self, e, vth=1.0): - """3-dim. normal distribution, to which array-valued thermal velocities can be passed. - - Parameters - ---------- - e : float | array-like - Energy. - - vth : float | array-like - Thermal velocity evaluated at psic. - - Returns - ------- - An array of size(e). - """ - - if isinstance(vth, xp.ndarray): - assert e.shape == vth.shape, f"{e.shape =} but {vth.shape =}" - - return 2.0 * xp.sqrt(e / xp.pi) / vth**3 * xp.exp(-e / vth**2) - - def __call__(self, *args): - """Evaluates the canonical Maxwellian distribution function. - - There are two use-cases for this function in the code: - - 1. Evaluating for particles ("flat evaluation", inputs are all 1D of length N_p) - 2. Evaluating the function on a meshgrid (in phase space). - - Hence all arguments must always have - - 1. the same shape - 2. either ndim = 1 or ndim = 3. - - Parameters - ---------- - *args : array_like - Position-velocity arguments in the order energy, magnetic moment, canonical toroidal momentum. - - Returns - ------- - f : xp.ndarray - The evaluated Maxwellian. - """ - - # Check that all args have the same shape - shape0 = xp.shape(args[0]) - for i, arg in enumerate(args): - assert xp.shape(arg) == shape0, f"Argument {i} has {xp.shape(arg) =}, but must be {shape0 =}." - assert xp.ndim(arg) == 1 or xp.ndim(arg) == 3, ( - f"{xp.ndim(arg) =} not allowed for canonical Maxwellian evaluation." - ) # flat or meshgrid evaluation - - # Get result evaluated with each particles' psic - res = self.n(args[2]) - vths = self.vth(args[2]) - - # take care of correct broadcasting, assuming args come from phase space meshgrid - if xp.ndim(args[0]) == 3: - # move eta axes to the back - arg_t = xp.moveaxis(args[0], 0, -1) - arg_t = xp.moveaxis(arg_t, 0, -1) - arg_t = xp.moveaxis(arg_t, 0, -1) - - # broadcast - res_broad = res + 0.0 * arg_t - - # move eta axes to the front - res = xp.moveaxis(res_broad, -1, 0) - res = xp.moveaxis(res, -1, 0) - res = xp.moveaxis(res, -1, 0) - - # Multiply result with gaussian in energy - if xp.ndim(args[0]) == 3: - vth_broad = vths + 0.0 * arg_t - vth = xp.moveaxis(vth_broad, -1, 0) - vth = xp.moveaxis(vth, -1, 0) - vth = xp.moveaxis(vth, -1, 0) - else: - vth = vths - - res *= self.gaussian(args[0], vth=vth) - - return res + return np.sqrt(energy) * 2.0 * np.sqrt(2.0) / absB0 @property def volume_form(self): @@ -544,18 +445,18 @@ def rc(self, psic): rc_squared = (psic - self.equil.psi_range[0]) / (self.equil.psi_range[1] - self.equil.psi_range[0]) # sorting out indices of negative rc² - neg_index = xp.logical_not(rc_squared >= 0) + neg_index = np.logical_not(rc_squared >= 0) # make them positive rc_squared[neg_index] *= -1 # calculate rc - rc = xp.sqrt(rc_squared) + rc = np.sqrt(rc_squared) rc[neg_index] *= -1 return rc - def n(self, psic, add_perturbation: bool = None): + def n(self, psic): """Density as background + perturbation. Parameters @@ -567,34 +468,24 @@ def n(self, psic, add_perturbation: bool = None): ------- A float (background value) or a numpy.array of the evaluated density. """ + # collect arguments - assert isinstance(psic, xp.ndarray) + assert isinstance(psic, np.ndarray) # assuming that input comes from meshgrid. if psic.ndim == 3: psic = psic[0, 0, :] # set background density - if isinstance(self.maxw_params["n"][0], (float, int)): - res = self.maxw_params["n"][0] + 0.0 * psic - else: - nfun = self.maxw_params["n"][1] - # for typ, params in mom_funcs.items(): - # nfun = getattr(moment_functions, typ)(**params) + if isinstance(self.maxw_params["n"], dict): + mom_funcs = self.maxw_params["n"] + for typ, params in mom_funcs.items(): + nfun = getattr(moment_functions, typ)(**params) res = nfun(eta1=self.rc(psic)) + else: + res = self.maxw_params["n"] + 0.0 * psic - # add perturbation - if add_perturbation is None: - add_perturbation = self.add_perturbation - - perturbation = self.maxw_params["n"][1] - if perturbation is not None and add_perturbation: - assert isinstance(perturbation, Perturbation) - res = perturbation(eta1=self.rc(psic)) - # if eta1.ndim == 1: - # out += perturbation(eta1, eta2, eta3) - # else: - # out += perturbation(*etas) + # TODO: add perturbation return res * self.moment_factors["n"] @@ -612,29 +503,18 @@ def vth(self, psic): """ # collect arguments - assert isinstance(psic, xp.ndarray) + assert isinstance(psic, np.ndarray) # assuming that input comes from meshgrid. if psic.ndim == 3: psic = psic[0, 0, :] - res = self.maxw_params["vth"][0] + 0.0 * psic + res = self.maxw_params["vth"] + 0.0 * psic # TODO: add perturbation return res * self.moment_factors["vth"] - @property - def add_perturbation(self) -> bool: - if not hasattr(self, "_add_perturbation"): - self._add_perturbation = True - return self._add_perturbation - - @add_perturbation.setter - def add_perturbation(self, new): - assert isinstance(new, bool) - self._add_perturbation = new - class ColdPlasma(Maxwellian): r"""Base class for a distribution as a Dirac-delta in velocity (vth = 0). @@ -655,28 +535,20 @@ def default_maxw_params(cls): def __init__( self, - n: tuple[float | Callable, Perturbation] = (1.0, None), - u1: tuple[float | Callable, Perturbation] = (0.0, None), - u2: tuple[float | Callable, Perturbation] = (0.0, None), - u3: tuple[float | Callable, Perturbation] = (0.0, None), - equil: FluidEquilibriumWithB = None, + maxw_params: dict = None, + pert_params: dict = None, + equil: FluidEquilibrium = None, ): - self._maxw_params = {} - self._maxw_params["n"] = n - self._maxw_params["u1"] = u1 - self._maxw_params["u2"] = u2 - self._maxw_params["u3"] = u3 - self._maxw_params["vth1"] = (0.0, None) - self._maxw_params["vth2"] = (0.0, None) - self._maxw_params["vth3"] = (0.0, None) - - self.check_maxw_params() + super().__init__( + maxw_params=maxw_params, + pert_params=pert_params, + equil=equil, + ) - self._equil = equil - - @property - def maxw_params(self): - return self._maxw_params + # make sure temperatures are zero + self._maxw_params["vth1"] = 0.0 + self._maxw_params["vth2"] = 0.0 + self._maxw_params["vth3"] = 0.0 @property def coords(self): @@ -699,10 +571,6 @@ def volume_form(self): return False @property - def equil(self) -> FluidEquilibriumWithB: - """Fluid background with B-field.""" - return self._equil - def velocity_jacobian_det(self, eta1, eta2, eta3, *v): """Jacobian determinant of the velocity coordinate transformation.""" return 1.0 diff --git a/src/struphy/kinetic_background/moment_functions.py b/src/struphy/kinetic_background/moment_functions.py index 39b22c6e1..8d17f670c 100644 --- a/src/struphy/kinetic_background/moment_functions.py +++ b/src/struphy/kinetic_background/moment_functions.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 "Analytical moment functions." -import cunumpy as xp +from struphy.utils.arrays import xp as np class ITPA_density: @@ -46,8 +46,8 @@ def __call__(self, eta1, eta2=None, eta3=None): val = ( self._n0 * self._c[3] - * xp.exp( - -self._c[2] / self._c[1] * xp.tanh((eta1 - self._c[0]) / self._c[2]), + * np.exp( + -self._c[2] / self._c[1] * np.tanh((eta1 - self._c[0]) / self._c[2]), ) ) diff --git a/src/struphy/kinetic_background/tests/test_base.py b/src/struphy/kinetic_background/tests/test_base.py index 8a2e89d28..2556d27b1 100644 --- a/src/struphy/kinetic_background/tests/test_base.py +++ b/src/struphy/kinetic_background/tests/test_base.py @@ -1,32 +1,32 @@ def test_kinetic_background_magics(show_plot=False): """Test the magic commands __sum__, __mul__ and __sub__ of the Maxwellian base class.""" - import cunumpy as xp import matplotlib.pyplot as plt from struphy.kinetic_background.maxwellians import Maxwellian3D + from struphy.utils.arrays import xp as np Nel = [32, 1, 1] - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) - v1 = xp.linspace(-7.0, 7.0, 128) + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) + v1 = np.linspace(-7.0, 7.0, 128) m1_params = {"n": 0.5, "u1": 3.0} m2_params = {"n": 0.5, "u1": -3.0} - m1 = Maxwellian3D(n=(0.5, None), u1=(3.0, None)) - m2 = Maxwellian3D(n=(0.5, None), u1=(-3.0, None)) + m1 = Maxwellian3D(maxw_params=m1_params) + m2 = Maxwellian3D(maxw_params=m2_params) m_add = m1 + m2 m_rmul_int = 2 * m1 m_mul_int = m1 * 2 m_mul_float = 2.0 * m1 - m_mul_npint = xp.ones(1, dtype=int)[0] * m1 + m_mul_npint = np.ones(1, dtype=int)[0] * m1 m_sub = m1 - m2 # compare distribution function - meshgrids = xp.meshgrid(e1, e2, e3, v1, [0.0], [0.0]) + meshgrids = np.meshgrid(e1, e2, e3, v1, [0.0], [0.0]) m1_vals = m1(*meshgrids) m2_vals = m2(*meshgrids) @@ -38,15 +38,15 @@ def test_kinetic_background_magics(show_plot=False): m_mul_npint_vals = m_mul_npint(*meshgrids) m_sub_vals = m_sub(*meshgrids) - assert xp.allclose(m1_vals + m2_vals, m_add_vals) - assert xp.allclose(2 * m1_vals, m_rmul_int_vals) - assert xp.allclose(2 * m1_vals, m_mul_int_vals) - assert xp.allclose(2.0 * m1_vals, m_mul_float_vals) - assert xp.allclose(xp.ones(1, dtype=int)[0] * m1_vals, m_mul_npint_vals) - assert xp.allclose(m1_vals - m2_vals, m_sub_vals) + assert np.allclose(m1_vals + m2_vals, m_add_vals) + assert np.allclose(2 * m1_vals, m_rmul_int_vals) + assert np.allclose(2 * m1_vals, m_mul_int_vals) + assert np.allclose(2.0 * m1_vals, m_mul_float_vals) + assert np.allclose(np.ones(1, dtype=int)[0] * m1_vals, m_mul_npint_vals) + assert np.allclose(m1_vals - m2_vals, m_sub_vals) # compare first two moments - meshgrids = xp.meshgrid(e1, e2, e3) + meshgrids = np.meshgrid(e1, e2, e3) n1_vals = m1.n(*meshgrids) n2_vals = m2.n(*meshgrids) @@ -57,11 +57,11 @@ def test_kinetic_background_magics(show_plot=False): u_add1, u_add2, u_add3 = m_add.u(*meshgrids) n_sub_vals = m_sub.n(*meshgrids) - assert xp.allclose(n1_vals + n2_vals, n_add_vals) - assert xp.allclose(u11 + u21, u_add1) - assert xp.allclose(u12 + u22, u_add2) - assert xp.allclose(u13 + u23, u_add3) - assert xp.allclose(n1_vals - n2_vals, n_sub_vals) + assert np.allclose(n1_vals + n2_vals, n_add_vals) + assert np.allclose(u11 + u21, u_add1) + assert np.allclose(u12 + u22, u_add2) + assert np.allclose(u13 + u23, u_add3) + assert np.allclose(n1_vals - n2_vals, n_sub_vals) if show_plot: plt.figure(figsize=(12, 8)) diff --git a/src/struphy/kinetic_background/tests/test_maxwellians.py b/src/struphy/kinetic_background/tests/test_maxwellians.py index 4aaa0624a..8378ffe17 100644 --- a/src/struphy/kinetic_background/tests/test_maxwellians.py +++ b/src/struphy/kinetic_background/tests/test_maxwellians.py @@ -8,31 +8,33 @@ def test_maxwellian_3d_uniform(Nel, show_plot=False): Asserts that the results over the domain and velocity space correspond to the analytical computation. """ - import cunumpy as xp import matplotlib.pyplot as plt from struphy.kinetic_background.maxwellians import Maxwellian3D + from struphy.utils.arrays import xp as np - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) # ========================================================== # ==== Test uniform non-shifted, isothermal Maxwellian ===== # ========================================================== - maxwellian = Maxwellian3D(n=(2.0, None)) + maxw_params = {"n": 2.0} - meshgrids = xp.meshgrid(e1, e2, e3, [0.0], [0.0], [0.0]) + maxwellian = Maxwellian3D(maxw_params=maxw_params) + + meshgrids = np.meshgrid(e1, e2, e3, [0.0], [0.0], [0.0]) # Test constant value at v=0 res = maxwellian(*meshgrids).squeeze() - assert xp.allclose(res, 2.0 / (2 * xp.pi) ** (3 / 2) + 0 * e1, atol=10e-10), ( - f"{res=},\n {2.0 / (2 * xp.pi) ** (3 / 2)}" + assert np.allclose(res, 2.0 / (2 * np.pi) ** (3 / 2) + 0 * e1, atol=10e-10), ( + f"{res=},\n {2.0 / (2 * np.pi) ** (3 / 2)}" ) # test Maxwellian profile in v - v1 = xp.linspace(-5, 5, 128) - meshgrids = xp.meshgrid( + v1 = np.linspace(-5, 5, 128) + meshgrids = np.meshgrid( [0.0], [0.0], [0.0], @@ -41,8 +43,8 @@ def test_maxwellian_3d_uniform(Nel, show_plot=False): [0.0], ) res = maxwellian(*meshgrids).squeeze() - res_ana = 2.0 * xp.exp(-(v1**2) / 2.0) / (2 * xp.pi) ** (3 / 2) - assert xp.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" + res_ana = 2.0 * np.exp(-(v1**2) / 2.0) / (2 * np.pi) ** (3 / 2) + assert np.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" # ======================================================= # ===== Test non-zero shifts and thermal velocities ===== @@ -54,28 +56,21 @@ def test_maxwellian_3d_uniform(Nel, show_plot=False): vth1 = 1.2 vth2 = 0.5 vth3 = 0.3 + maxw_params = {"n": n, "u1": u1, "u2": u2, "u3": u3, "vth1": vth1, "vth2": vth2, "vth3": vth3} - maxwellian = Maxwellian3D( - n=(2.0, None), - u1=(1.0, None), - u2=(-0.2, None), - u3=(0.1, None), - vth1=(1.2, None), - vth2=(0.5, None), - vth3=(0.3, None), - ) + maxwellian = Maxwellian3D(maxw_params=maxw_params) # test Maxwellian profile in v for i in range(3): vs = [0, 0, 0] - vs[i] = xp.linspace(-5, 5, 128) - meshgrids = xp.meshgrid([0.0], [0.0], [0.0], *vs) + vs[i] = np.linspace(-5, 5, 128) + meshgrids = np.meshgrid([0.0], [0.0], [0.0], *vs) res = maxwellian(*meshgrids).squeeze() - res_ana = xp.exp(-((vs[0] - u1) ** 2) / (2 * vth1**2)) - res_ana *= xp.exp(-((vs[1] - u2) ** 2) / (2 * vth2**2)) - res_ana *= xp.exp(-((vs[2] - u3) ** 2) / (2 * vth3**2)) - res_ana *= n / ((2 * xp.pi) ** (3 / 2) * vth1 * vth2 * vth3) + res_ana = np.exp(-((vs[0] - u1) ** 2) / (2 * vth1**2)) + res_ana *= np.exp(-((vs[1] - u2) ** 2) / (2 * vth2**2)) + res_ana *= np.exp(-((vs[2] - u3) ** 2) / (2 * vth3**2)) + res_ana *= n / ((2 * np.pi) ** (3 / 2) * vth1 * vth2 * vth3) if show_plot: plt.plot(vs[i], res_ana, label="analytical") @@ -86,21 +81,20 @@ def test_maxwellian_3d_uniform(Nel, show_plot=False): plt.xlabel("v_" + str(i + 1)) plt.show() - assert xp.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana =}" + assert np.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana =}" @pytest.mark.parametrize("Nel", [[64, 1, 1]]) def test_maxwellian_3d_perturbed(Nel, show_plot=False): """Tests the Maxwellian3D class for perturbations.""" - import cunumpy as xp import matplotlib.pyplot as plt - from struphy.initial import perturbations from struphy.kinetic_background.maxwellians import Maxwellian3D + from struphy.utils.arrays import xp as np - e1 = xp.linspace(0.0, 1.0, Nel[0]) - v1 = xp.linspace(-5.0, 5.0, 128) + e1 = np.linspace(0.0, 1.0, Nel[0]) + v1 = np.linspace(-5.0, 5.0, 128) # =============================================== # ===== Test cosine perturbation in density ===== @@ -108,14 +102,23 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): amp = 0.1 mode = 1 - pert = perturbations.ModesCos(ls=(mode,), amps=(amp,)) + maxw_params = {"n": 2.0} + pert_params = { + "n": { + "ModesCos": { + "given_in_basis": "0", + "ls": [mode], + "amps": [amp], + } + } + } - maxwellian = Maxwellian3D(n=(2.0, pert)) + maxwellian = Maxwellian3D(maxw_params=maxw_params, pert_params=pert_params) - meshgrids = xp.meshgrid(e1, [0.0], [0.0], [0.0], [0.0], [0.0]) + meshgrids = np.meshgrid(e1, [0.0], [0.0], [0.0], [0.0], [0.0]) res = maxwellian(*meshgrids).squeeze() - ana_res = (2.0 + amp * xp.cos(2 * xp.pi * mode * e1)) / (2 * xp.pi) ** (3 / 2) + ana_res = (2.0 + amp * np.cos(2 * np.pi * mode * e1)) / (2 * np.pi) ** (3 / 2) if show_plot: plt.plot(e1, ana_res, label="analytical") @@ -126,7 +129,7 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): plt.ylabel("f(eta_1)") plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" # ============================================= # ===== Test cosine perturbation in shift ===== @@ -136,11 +139,20 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): n = 2.0 u1 = 1.2 - pert = perturbations.ModesCos(ls=(mode,), amps=(amp,)) + maxw_params = {"n": n, "u1": u1} + pert_params = { + "u1": { + "ModesCos": { + "given_in_basis": "0", + "ls": [mode], + "amps": [amp], + } + } + } - maxwellian = Maxwellian3D(n=(n, None), u1=(u1, pert)) + maxwellian = Maxwellian3D(maxw_params=maxw_params, pert_params=pert_params) - meshgrids = xp.meshgrid( + meshgrids = np.meshgrid( e1, [0.0], [0.0], @@ -150,9 +162,9 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): ) res = maxwellian(*meshgrids).squeeze() - shift = u1 + amp * xp.cos(2 * xp.pi * mode * e1) - ana_res = xp.exp(-((v1 - shift[:, None]) ** 2) / 2) - ana_res *= n / (2 * xp.pi) ** (3 / 2) + shift = u1 + amp * np.cos(2 * np.pi * mode * e1) + ana_res = np.exp(-((v1 - shift[:, None]) ** 2) / 2) + ana_res *= n / (2 * np.pi) ** (3 / 2) if show_plot: plt.figure(1) @@ -173,7 +185,7 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" # =========================================== # ===== Test cosine perturbation in vth ===== @@ -183,11 +195,20 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): n = 2.0 vth1 = 1.2 - pert = perturbations.ModesCos(ls=(mode,), amps=(amp,)) + maxw_params = {"n": n, "vth1": vth1} + pert_params = { + "vth1": { + "ModesCos": { + "given_in_basis": "0", + "ls": [mode], + "amps": [amp], + } + } + } - maxwellian = Maxwellian3D(n=(n, None), vth1=(vth1, pert)) + maxwellian = Maxwellian3D(maxw_params=maxw_params, pert_params=pert_params) - meshgrids = xp.meshgrid( + meshgrids = np.meshgrid( e1, [0.0], [0.0], @@ -197,9 +218,9 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): ) res = maxwellian(*meshgrids).squeeze() - thermal = vth1 + amp * xp.cos(2 * xp.pi * mode * e1) - ana_res = xp.exp(-(v1**2) / (2.0 * thermal[:, None] ** 2)) - ana_res *= n / ((2 * xp.pi) ** (3 / 2) * thermal[:, None]) + thermal = vth1 + amp * np.cos(2 * np.pi * mode * e1) + ana_res = np.exp(-(v1**2) / (2.0 * thermal[:, None] ** 2)) + ana_res *= n / ((2 * np.pi) ** (3 / 2) * thermal[:, None]) if show_plot: plt.figure(1) @@ -220,22 +241,30 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" # ============================================= # ===== Test ITPA perturbation in density ===== # ============================================= n0 = 0.00720655 - c = (0.491230, 0.298228, 0.198739, 0.521298) + c = [0.491230, 0.298228, 0.198739, 0.521298] - pert = perturbations.ITPA_density(n0=n0, c=c) + maxw_params = { + "n": { + "ITPA_density": { + "given_in_basis": "0", + "n0": n0, + "c": c, + } + } + } - maxwellian = Maxwellian3D(n=(0.0, pert)) + maxwellian = Maxwellian3D(maxw_params=maxw_params) - meshgrids = xp.meshgrid(e1, [0.0], [0.0], [0.0], [0.0], [0.0]) + meshgrids = np.meshgrid(e1, [0.0], [0.0], [0.0], [0.0], [0.0]) res = maxwellian(*meshgrids).squeeze() - ana_res = n0 * c[3] * xp.exp(-c[2] / c[1] * xp.tanh((e1 - c[0]) / c[2])) / (2 * xp.pi) ** (3 / 2) + ana_res = n0 * c[3] * np.exp(-c[2] / c[1] * np.tanh((e1 - c[0]) / c[2])) / (2 * np.pi) ** (3 / 2) if show_plot: plt.plot(e1, ana_res, label="analytical") @@ -246,7 +275,7 @@ def test_maxwellian_3d_perturbed(Nel, show_plot=False): plt.ylabel("f(eta_1)") plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" @pytest.mark.parametrize("Nel", [[8, 11, 12]]) @@ -255,85 +284,92 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): import inspect - import cunumpy as xp import matplotlib.pyplot as plt from struphy.fields_background import equils - from struphy.fields_background.base import FluidEquilibrium from struphy.geometry import domains from struphy.initial import perturbations - from struphy.initial.base import Perturbation from struphy.kinetic_background.maxwellians import Maxwellian3D + from struphy.utils.arrays import xp as np + + maxw_params_mhd = { + "n": "fluid_background", + "u1": "fluid_background", + "u2": "fluid_background", + "u3": "fluid_background", + "vth1": "fluid_background", + "vth2": "fluid_background", + "vth3": "fluid_background", + } + + maxw_params_1 = { + "n": 1.0, + "u1": "fluid_background", + "u2": "fluid_background", + "u3": "fluid_background", + "vth1": "fluid_background", + "vth2": "fluid_background", + "vth3": "fluid_background", + } - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) v1 = [0.0] v2 = [0.0, -1.0] v3 = [0.0, -1.0, -1.3] - meshgrids = xp.meshgrid(e1, e2, e3, v1, v2, v3, indexing="ij") - e_meshgrids = xp.meshgrid(e1, e2, e3, indexing="ij") + meshgrids = np.meshgrid(e1, e2, e3, v1, v2, v3, indexing="ij") + e_meshgrids = np.meshgrid(e1, e2, e3, indexing="ij") n_mks = 17 - e1_fl = xp.random.rand(n_mks) - e2_fl = xp.random.rand(n_mks) - e3_fl = xp.random.rand(n_mks) - v1_fl = xp.random.randn(n_mks) - v2_fl = xp.random.randn(n_mks) - v3_fl = xp.random.randn(n_mks) + e1_fl = np.random.rand(n_mks) + e2_fl = np.random.rand(n_mks) + e3_fl = np.random.rand(n_mks) + v1_fl = np.random.randn(n_mks) + v2_fl = np.random.randn(n_mks) + v3_fl = np.random.randn(n_mks) args_fl = [e1_fl, e2_fl, e3_fl, v1_fl, v2_fl, v3_fl] - e_args_fl = xp.concatenate((e1_fl[:, None], e2_fl[:, None], e3_fl[:, None]), axis=1) + e_args_fl = np.concatenate((e1_fl[:, None], e2_fl[:, None], e3_fl[:, None]), axis=1) for key, val in inspect.getmembers(equils): if inspect.isclass(val) and val.__module__ == equils.__name__: - print(f"{key =}") + print(f"{key = }") if "DESCequilibrium" in key and not with_desc: - print(f"Attention: {with_desc =}, DESC not tested here !!") + print(f"Attention: {with_desc = }, DESC not tested here !!") continue if "GVECequilibrium" in key: - print("Attention: flat (marker) evaluation not tested for GVEC at the moment.") + print(f"Attention: flat (marker) evaluation not tested for GVEC at the moment.") mhd_equil = val() - assert isinstance(mhd_equil, FluidEquilibrium) - print(f"{mhd_equil.params =}") + print(f"{mhd_equil.params = }") if "AdhocTorus" in key: mhd_equil.domain = domains.HollowTorus( - a1=1e-3, - a2=mhd_equil.params["a"], - R0=mhd_equil.params["R0"], - tor_period=1, + a1=1e-3, a2=mhd_equil.params["a"], R0=mhd_equil.params["R0"], tor_period=1 ) elif "EQDSKequilibrium" in key: mhd_equil.domain = domains.Tokamak(equilibrium=mhd_equil) elif "CircularTokamak" in key: mhd_equil.domain = domains.HollowTorus( - a1=1e-3, - a2=mhd_equil.params["a"], - R0=mhd_equil.params["R0"], - tor_period=1, + a1=1e-3, a2=mhd_equil.params["a"], R0=mhd_equil.params["R0"], tor_period=1 ) elif "HomogenSlab" in key: mhd_equil.domain = domains.Cuboid() elif "ShearedSlab" in key: mhd_equil.domain = domains.Cuboid( r1=mhd_equil.params["a"], - r2=mhd_equil.params["a"] * 2 * xp.pi, - r3=mhd_equil.params["R0"] * 2 * xp.pi, + r2=mhd_equil.params["a"] * 2 * np.pi, + r3=mhd_equil.params["R0"] * 2 * np.pi, ) elif "ShearFluid" in key: mhd_equil.domain = domains.Cuboid( - r1=mhd_equil.params["a"], - r2=mhd_equil.params["b"], - r3=mhd_equil.params["c"], + r1=mhd_equil.params["a"], r2=mhd_equil.params["b"], r3=mhd_equil.params["c"] ) elif "ScrewPinch" in key: mhd_equil.domain = domains.HollowCylinder( - a1=1e-3, - a2=mhd_equil.params["a"], - Lz=mhd_equil.params["R0"] * 2 * xp.pi, + a1=1e-3, a2=mhd_equil.params["a"], Lz=mhd_equil.params["R0"] * 2 * np.pi ) else: try: @@ -341,81 +377,57 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): except: print(f"Not setting domain for {key}.") - maxwellian = Maxwellian3D( - n=(mhd_equil.n0, None), - u1=(mhd_equil.u_cart_1, None), - u2=(mhd_equil.u_cart_2, None), - u3=(mhd_equil.u_cart_3, None), - vth1=(mhd_equil.vth0, None), - vth2=(mhd_equil.vth0, None), - vth3=(mhd_equil.vth0, None), - ) + maxwellian = Maxwellian3D(maxw_params=maxw_params_mhd, equil=mhd_equil) - maxwellian_1 = Maxwellian3D( - n=(1.0, None), - u1=(mhd_equil.u_cart_1, None), - u2=(mhd_equil.u_cart_2, None), - u3=(mhd_equil.u_cart_3, None), - vth1=(mhd_equil.vth0, None), - vth2=(mhd_equil.vth0, None), - vth3=(mhd_equil.vth0, None), - ) + maxwellian_1 = Maxwellian3D(maxw_params=maxw_params_1, equil=mhd_equil) # test meshgrid evaluation n0 = mhd_equil.n0(*e_meshgrids) - assert xp.allclose( - maxwellian(*meshgrids)[:, :, :, 0, 0, 0], - n0 * maxwellian_1(*meshgrids)[:, :, :, 0, 0, 0], + assert np.allclose( + maxwellian(*meshgrids)[:, :, :, 0, 0, 0], n0 * maxwellian_1(*meshgrids)[:, :, :, 0, 0, 0] ) - assert xp.allclose( - maxwellian(*meshgrids)[:, :, :, 0, 1, 2], - n0 * maxwellian_1(*meshgrids)[:, :, :, 0, 1, 2], + assert np.allclose( + maxwellian(*meshgrids)[:, :, :, 0, 1, 2], n0 * maxwellian_1(*meshgrids)[:, :, :, 0, 1, 2] ) # test flat evaluation if "GVECequilibrium" in key: pass else: - assert xp.allclose(maxwellian(*args_fl), mhd_equil.n0(e_args_fl) * maxwellian_1(*args_fl)) - assert xp.allclose(maxwellian.n(e1_fl, e2_fl, e3_fl), mhd_equil.n0(e_args_fl)) + assert np.allclose(maxwellian(*args_fl), mhd_equil.n0(e_args_fl) * maxwellian_1(*args_fl)) + assert np.allclose(maxwellian.n(e1_fl, e2_fl, e3_fl), mhd_equil.n0(e_args_fl)) u_maxw = maxwellian.u(e1_fl, e2_fl, e3_fl) u_eq = mhd_equil.u_cart(e_args_fl)[0] - assert all([xp.allclose(m, e) for m, e in zip(u_maxw, u_eq)]) + assert all([np.allclose(m, e) for m, e in zip(u_maxw, u_eq)]) vth_maxw = maxwellian.vth(e1_fl, e2_fl, e3_fl) - vth_eq = xp.sqrt(mhd_equil.p0(e_args_fl) / mhd_equil.n0(e_args_fl)) - assert all([xp.allclose(v, vth_eq) for v in vth_maxw]) + vth_eq = np.sqrt(mhd_equil.p0(e_args_fl) / mhd_equil.n0(e_args_fl)) + assert all([np.allclose(v, vth_eq) for v in vth_maxw]) # plotting moments if show_plot: - plt.figure(f"{mhd_equil =}", figsize=(24, 16)) + plt.figure(f"{mhd_equil = }", figsize=(24, 16)) x, y, z = mhd_equil.domain(*e_meshgrids) # density plots n_cart = mhd_equil.domain.push(maxwellian.n, *e_meshgrids) - levels = xp.linspace(xp.min(n_cart) - 1e-10, xp.max(n_cart), 20) + levels = np.linspace(np.min(n_cart) - 1e-10, np.max(n_cart), 20) plt.subplot(2, 5, 1) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, 0, :], z[:, 0, :], n_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - z[:, Nel[1] // 2 - 1, :], - n_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], z[:, Nel[1] // 2 - 1, :], n_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("z") else: plt.contourf(x[:, 0, :], y[:, 0, :], n_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - y[:, Nel[1] // 2 - 1, :], - n_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], y[:, Nel[1] // 2 - 1, :], n_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("y") @@ -438,7 +450,7 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): # velocity plots us = maxwellian.u(*e_meshgrids) for i, u in enumerate(us): - levels = xp.linspace(xp.min(u) - 1e-10, xp.max(u), 20) + levels = np.linspace(np.min(u) - 1e-10, np.max(u), 20) plt.subplot(2, 5, 2 + i) if "Slab" in key or "Pinch" in key: @@ -471,32 +483,26 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): vth = maxwellian.vth(*e_meshgrids)[0] vth_cart = mhd_equil.domain.push(vth, *e_meshgrids) - levels = xp.linspace(xp.min(vth_cart) - 1e-10, xp.max(vth_cart), 20) + levels = np.linspace(np.min(vth_cart) - 1e-10, np.max(vth_cart), 20) plt.subplot(2, 5, 5) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, 0, :], z[:, 0, :], vth_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - z[:, Nel[1] // 2 - 1, :], - vth_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], z[:, Nel[1] // 2 - 1, :], vth_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("z") else: plt.contourf(x[:, 0, :], y[:, 0, :], vth_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - y[:, Nel[1] // 2 - 1, :], - vth_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], y[:, Nel[1] // 2 - 1, :], vth_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("y") plt.axis("equal") plt.colorbar() - plt.title("Maxwellian thermal velocity $v_t$, top view (e1-e3)") + plt.title(f"Maxwellian thermal velocity $v_t$, top view (e1-e3)") plt.subplot(2, 5, 10) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, :, 0], y[:, :, 0], vth_cart[:, :, 0], levels=levels) @@ -508,7 +514,7 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): plt.ylabel("z") plt.axis("equal") plt.colorbar() - plt.title("Maxwellian thermal velocity $v_t$, poloidal view (e1-e2)") + plt.title(f"Maxwellian thermal velocity $v_t$, poloidal view (e1-e2)") plt.show() @@ -517,22 +523,23 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): maxw_params_zero = {"n": 0.0, "vth1": 0.0, "vth2": 0.0, "vth3": 0.0} for key_2, val_2 in inspect.getmembers(perturbations): - if inspect.isclass(val_2) and val_2.__module__ == perturbations.__name__: + if inspect.isclass(val_2): + print(f"{key_2 = }") pert = val_2() - assert isinstance(pert, Perturbation) - print(f"{pert =}") - if isinstance(pert, perturbations.Noise): - continue + print(f"{pert = }") + pert_params = { + "n": {key_2: {"given_in_basis": "0"}}, + "u1": {key_2: {"given_in_basis": "0"}}, + "u2": {key_2: {"given_in_basis": "0"}}, + "u3": {key_2: {"given_in_basis": "0"}}, + "vth1": {key_2: {"given_in_basis": "0"}}, + "vth2": {key_2: {"given_in_basis": "0"}}, + "vth3": {key_2: {"given_in_basis": "0"}}, + } # background + perturbation maxwellian_perturbed = Maxwellian3D( - n=(mhd_equil.n0, pert), - u1=(mhd_equil.u_cart_1, pert), - u2=(mhd_equil.u_cart_2, pert), - u3=(mhd_equil.u_cart_3, pert), - vth1=(mhd_equil.vth0, pert), - vth2=(mhd_equil.vth0, pert), - vth3=(mhd_equil.vth0, pert), + maxw_params=maxw_params_mhd, pert_params=pert_params, equil=mhd_equil ) # test meshgrid evaluation @@ -543,22 +550,16 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): # pure perturbation maxwellian_zero_bckgr = Maxwellian3D( - n=(0.0, pert), - u1=(0.0, pert), - u2=(0.0, pert), - u3=(0.0, pert), - vth1=(0.0, pert), - vth2=(0.0, pert), - vth3=(0.0, pert), + maxw_params=maxw_params_zero, pert_params=pert_params, equil=mhd_equil ) - assert xp.allclose(maxwellian_zero_bckgr.n(*e_meshgrids), pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[0], pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[1], pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[2], pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[0], pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[1], pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[2], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.n(*e_meshgrids), pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[0], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[1], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[2], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[0], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[1], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[2], pert(*e_meshgrids)) # plotting perturbations if show_plot: # and 'Torus' in key_2: @@ -568,26 +569,20 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): # density plots n_cart = mhd_equil.domain.push(maxwellian_zero_bckgr.n, *e_meshgrids) - levels = xp.linspace(xp.min(n_cart) - 1e-10, xp.max(n_cart), 20) + levels = np.linspace(np.min(n_cart) - 1e-10, np.max(n_cart), 20) plt.subplot(2, 5, 1) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, 0, :], z[:, 0, :], n_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - z[:, Nel[1] // 2, :], - n_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], z[:, Nel[1] // 2, :], n_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("z") else: plt.contourf(x[:, 0, :], y[:, 0, :], n_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - y[:, Nel[1] // 2, :], - n_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], y[:, Nel[1] // 2, :], n_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("y") @@ -610,26 +605,20 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): # velocity plots us = maxwellian_zero_bckgr.u(*e_meshgrids) for i, u in enumerate(us): - levels = xp.linspace(xp.min(u) - 1e-10, xp.max(u), 20) + levels = np.linspace(np.min(u) - 1e-10, np.max(u), 20) plt.subplot(2, 5, 2 + i) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, 0, :], z[:, 0, :], u[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - z[:, Nel[1] // 2, :], - u[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], z[:, Nel[1] // 2, :], u[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("z") else: plt.contourf(x[:, 0, :], y[:, 0, :], u[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - y[:, Nel[1] // 2, :], - u[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], y[:, Nel[1] // 2, :], u[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("y") @@ -653,7 +642,7 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): vth = maxwellian_zero_bckgr.vth(*e_meshgrids)[0] vth_cart = mhd_equil.domain.push(vth, *e_meshgrids) - levels = xp.linspace(xp.min(vth_cart) - 1e-10, xp.max(vth_cart), 20) + levels = np.linspace(np.min(vth_cart) - 1e-10, np.max(vth_cart), 20) plt.subplot(2, 5, 5) if "Slab" in key or "Pinch" in key: @@ -678,7 +667,7 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): plt.ylabel("y") plt.axis("equal") plt.colorbar() - plt.title("Maxwellian perturbed thermal velocity $v_t$, top view (e1-e3)") + plt.title(f"Maxwellian perturbed thermal velocity $v_t$, top view (e1-e3)") plt.subplot(2, 5, 10) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, :, 0], y[:, :, 0], vth_cart[:, :, 0], levels=levels) @@ -690,7 +679,7 @@ def test_maxwellian_3d_mhd(Nel, with_desc, show_plot=False): plt.ylabel("z") plt.axis("equal") plt.colorbar() - plt.title("Maxwellian perturbed thermal velocity $v_t$, poloidal view (e1-e2)") + plt.title(f"Maxwellian perturbed thermal velocity $v_t$, poloidal view (e1-e2)") plt.show() @@ -702,34 +691,36 @@ def test_maxwellian_2d_uniform(Nel, show_plot=False): Asserts that the results over the domain and velocity space correspond to the analytical computation. """ - import cunumpy as xp import matplotlib.pyplot as plt from struphy.kinetic_background.maxwellians import GyroMaxwellian2D + from struphy.utils.arrays import xp as np - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) # =========================================================== # ===== Test uniform non-shifted, isothermal Maxwellian ===== # =========================================================== - maxwellian = GyroMaxwellian2D(n=(2.0, None), volume_form=False) + maxw_params = {"n": 2.0} - meshgrids = xp.meshgrid(e1, e2, e3, [0.01], [0.01]) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params, volume_form=False) + + meshgrids = np.meshgrid(e1, e2, e3, [0.01], [0.01]) # Test constant value at v_para = v_perp = 0.01 res = maxwellian(*meshgrids).squeeze() - assert xp.allclose(res, 2.0 / (2 * xp.pi) ** (1 / 2) * xp.exp(-(0.01**2)) + 0 * e1, atol=10e-10), ( - f"{res=},\n {2.0 / (2 * xp.pi) ** (3 / 2)}" + assert np.allclose(res, 2.0 / (2 * np.pi) ** (1 / 2) * np.exp(-(0.01**2)) + 0 * e1, atol=10e-10), ( + f"{res=},\n {2.0 / (2 * np.pi) ** (3 / 2)}" ) # test Maxwellian profile in v - v_para = xp.linspace(-5, 5, 64) - v_perp = xp.linspace(0, 2.5, 64) - vpara, vperp = xp.meshgrid(v_para, v_perp) + v_para = np.linspace(-5, 5, 64) + v_perp = np.linspace(0, 2.5, 64) + vpara, vperp = np.meshgrid(v_para, v_perp) - meshgrids = xp.meshgrid( + meshgrids = np.meshgrid( [0.0], [0.0], [0.0], @@ -738,8 +729,8 @@ def test_maxwellian_2d_uniform(Nel, show_plot=False): ) res = maxwellian(*meshgrids).squeeze() - res_ana = 2.0 / (2 * xp.pi) ** (1 / 2) * xp.exp(-(vpara.T**2) / 2.0 - vperp.T**2 / 2.0) - assert xp.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" + res_ana = 2.0 / (2 * np.pi) ** (1 / 2) * np.exp(-(vpara.T**2) / 2.0 - vperp.T**2 / 2.0) + assert np.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" # ======================================================= # ===== Test non-zero shifts and thermal velocities ===== @@ -749,27 +740,21 @@ def test_maxwellian_2d_uniform(Nel, show_plot=False): u_perp = 0.2 vth_para = 1.2 vth_perp = 0.5 + maxw_params = {"n": n, "u_para": u_para, "u_perp": u_perp, "vth_para": vth_para, "vth_perp": vth_perp} - maxwellian = GyroMaxwellian2D( - n=(n, None), - u_para=(u_para, None), - u_perp=(u_perp, None), - vth_para=(vth_para, None), - vth_perp=(vth_perp, None), - volume_form=False, - ) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params, volume_form=False) # test Maxwellian profile in v - v_para = xp.linspace(-5, 5, 64) - v_perp = xp.linspace(0, 2.5, 64) - vpara, vperp = xp.meshgrid(v_para, v_perp) + v_para = np.linspace(-5, 5, 64) + v_perp = np.linspace(0, 2.5, 64) + vpara, vperp = np.meshgrid(v_para, v_perp) - meshgrids = xp.meshgrid([0.0], [0.0], [0.0], v_para, v_perp) + meshgrids = np.meshgrid([0.0], [0.0], [0.0], v_para, v_perp) res = maxwellian(*meshgrids).squeeze() - res_ana = xp.exp(-((vpara.T - u_para) ** 2) / (2 * vth_para**2)) - res_ana *= xp.exp(-((vperp.T - u_perp) ** 2) / (2 * vth_perp**2)) - res_ana *= n / ((2 * xp.pi) ** (1 / 2) * vth_para * vth_perp**2) + res_ana = np.exp(-((vpara.T - u_para) ** 2) / (2 * vth_para**2)) + res_ana *= np.exp(-((vperp.T - u_perp) ** 2) / (2 * vth_perp**2)) + res_ana *= n / ((2 * np.pi) ** (1 / 2) * vth_para * vth_perp**2) if show_plot: plt.plot(v_para, res_ana[:, 32], label="analytical") @@ -788,38 +773,38 @@ def test_maxwellian_2d_uniform(Nel, show_plot=False): plt.xlabel("v_" + "perp") plt.show() - assert xp.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana =}" + assert np.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana =}" @pytest.mark.parametrize("Nel", [[6, 1, 1]]) def test_maxwellian_2d_perturbed(Nel, show_plot=False): """Tests the GyroMaxwellian2D class for perturbations.""" - import cunumpy as xp import matplotlib.pyplot as plt - from struphy.initial import perturbations from struphy.kinetic_background.maxwellians import GyroMaxwellian2D + from struphy.utils.arrays import xp as np - e1 = xp.linspace(0.0, 1.0, Nel[0]) - v1 = xp.linspace(-5.0, 5.0, 128) - v2 = xp.linspace(0, 2.5, 128) + e1 = np.linspace(0.0, 1.0, Nel[0]) + v1 = np.linspace(-5.0, 5.0, 128) + v2 = np.linspace(0, 2.5, 128) # =============================================== # ===== Test cosine perturbation in density ===== # =============================================== amp = 0.1 mode = 1 - pert = perturbations.ModesCos(ls=(mode,), amps=(amp,)) + maxw_params = {"n": 2.0} + pert_params = {"n": {"ModesCos": {"given_in_basis": "0", "ls": [mode], "amps": [amp]}}} - maxwellian = GyroMaxwellian2D(n=(2.0, pert), volume_form=False) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params, pert_params=pert_params, volume_form=False) v_perp = 0.1 - meshgrids = xp.meshgrid(e1, [0.0], [0.0], [0.0], v_perp) + meshgrids = np.meshgrid(e1, [0.0], [0.0], [0.0], v_perp) res = maxwellian(*meshgrids).squeeze() - ana_res = (2.0 + amp * xp.cos(2 * xp.pi * mode * e1)) / (2 * xp.pi) ** (1 / 2) - ana_res *= xp.exp(-(v_perp**2) / 2) + ana_res = (2.0 + amp * np.cos(2 * np.pi * mode * e1)) / (2 * np.pi) ** (1 / 2) + ana_res *= np.exp(-(v_perp**2) / 2) if show_plot: plt.plot(e1, ana_res, label="analytical") @@ -830,7 +815,7 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): plt.ylabel("f(eta_1)") plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" # ==================================================== # ===== Test cosine perturbation in shift (para) ===== @@ -839,21 +824,18 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): mode = 1 n = 2.0 u_para = 1.2 - pert = perturbations.ModesCos(ls=(mode,), amps=(amp,)) + maxw_params = {"n": n, "u_para": u_para} + pert_params = {"u_para": {"ModesCos": {"given_in_basis": "0", "ls": [mode], "amps": [amp]}}} - maxwellian = GyroMaxwellian2D( - n=(2.0, None), - u_para=(u_para, pert), - volume_form=False, - ) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params, pert_params=pert_params, volume_form=False) v_perp = 0.1 - meshgrids = xp.meshgrid(e1, [0.0], [0.0], v1, v_perp) + meshgrids = np.meshgrid(e1, [0.0], [0.0], v1, v_perp) res = maxwellian(*meshgrids).squeeze() - shift = u_para + amp * xp.cos(2 * xp.pi * mode * e1) - ana_res = xp.exp(-((v1 - shift[:, None]) ** 2) / 2.0) - ana_res *= n / (2 * xp.pi) ** (1 / 2) * xp.exp(-(v_perp**2) / 2.0) + shift = u_para + amp * np.cos(2 * np.pi * mode * e1) + ana_res = np.exp(-((v1 - shift[:, None]) ** 2) / 2.0) + ana_res *= n / (2 * np.pi) ** (1 / 2) * np.exp(-(v_perp**2) / 2.0) if show_plot: plt.figure(1) @@ -874,7 +856,7 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" # ==================================================== # ===== Test cosine perturbation in shift (perp) ===== @@ -883,20 +865,17 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): mode = 1 n = 2.0 u_perp = 1.2 - pert = perturbations.ModesCos(ls=(mode,), amps=(amp,)) + maxw_params = {"n": n, "u_perp": u_perp} + pert_params = {"u_perp": {"ModesCos": {"given_in_basis": "0", "ls": [mode], "amps": [amp]}}} - maxwellian = GyroMaxwellian2D( - n=(2.0, None), - u_perp=(u_perp, pert), - volume_form=False, - ) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params, pert_params=pert_params, volume_form=False) - meshgrids = xp.meshgrid(e1, [0.0], [0.0], 0.0, v2) + meshgrids = np.meshgrid(e1, [0.0], [0.0], 0.0, v2) res = maxwellian(*meshgrids).squeeze() - shift = u_perp + amp * xp.cos(2 * xp.pi * mode * e1) - ana_res = xp.exp(-((v2 - shift[:, None]) ** 2) / 2.0) - ana_res *= n / (2 * xp.pi) ** (1 / 2) + shift = u_perp + amp * np.cos(2 * np.pi * mode * e1) + ana_res = np.exp(-((v2 - shift[:, None]) ** 2) / 2.0) + ana_res *= n / (2 * np.pi) ** (1 / 2) if show_plot: plt.figure(1) @@ -917,7 +896,7 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" # ================================================== # ===== Test cosine perturbation in vth (para) ===== @@ -926,16 +905,13 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): mode = 1 n = 2.0 vth_para = 1.2 - pert = perturbations.ModesCos(ls=(mode,), amps=(amp,)) + maxw_params = {"n": n, "vth_para": vth_para} + pert_params = {"vth_para": {"ModesCos": {"given_in_basis": "0", "ls": [mode], "amps": [amp]}}} - maxwellian = GyroMaxwellian2D( - n=(2.0, None), - vth_para=(vth_para, pert), - volume_form=False, - ) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params, pert_params=pert_params, volume_form=False) v_perp = 0.1 - meshgrids = xp.meshgrid( + meshgrids = np.meshgrid( e1, [0.0], [0.0], @@ -944,10 +920,10 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): ) res = maxwellian(*meshgrids).squeeze() - thermal = vth_para + amp * xp.cos(2 * xp.pi * mode * e1) - ana_res = xp.exp(-(v1**2) / (2.0 * thermal[:, None] ** 2)) - ana_res *= n / ((2 * xp.pi) ** (1 / 2) * thermal[:, None]) - ana_res *= xp.exp(-(v_perp**2) / 2.0) + thermal = vth_para + amp * np.cos(2 * np.pi * mode * e1) + ana_res = np.exp(-(v1**2) / (2.0 * thermal[:, None] ** 2)) + ana_res *= n / ((2 * np.pi) ** (1 / 2) * thermal[:, None]) + ana_res *= np.exp(-(v_perp**2) / 2.0) if show_plot: plt.figure(1) @@ -968,7 +944,7 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" # ================================================== # ===== Test cosine perturbation in vth (perp) ===== @@ -977,15 +953,12 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): mode = 1 n = 2.0 vth_perp = 1.2 - pert = perturbations.ModesCos(ls=(mode,), amps=(amp,)) + maxw_params = {"n": n, "vth_perp": vth_perp} + pert_params = {"vth_perp": {"ModesCos": {"given_in_basis": "0", "ls": [mode], "amps": [amp]}}} - maxwellian = GyroMaxwellian2D( - n=(2.0, None), - vth_perp=(vth_perp, pert), - volume_form=False, - ) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params, pert_params=pert_params, volume_form=False) - meshgrids = xp.meshgrid( + meshgrids = np.meshgrid( e1, [0.0], [0.0], @@ -994,9 +967,9 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): ) res = maxwellian(*meshgrids).squeeze() - thermal = vth_perp + amp * xp.cos(2 * xp.pi * mode * e1) - ana_res = xp.exp(-(v2**2) / (2.0 * thermal[:, None] ** 2)) - ana_res *= n / ((2 * xp.pi) ** (1 / 2) * thermal[:, None] ** 2) + thermal = vth_perp + amp * np.cos(2 * np.pi * mode * e1) + ana_res = np.exp(-(v2**2) / (2.0 * thermal[:, None] ** 2)) + ana_res *= n / ((2 * np.pi) ** (1 / 2) * thermal[:, None] ** 2) if show_plot: plt.figure(1) @@ -1017,23 +990,31 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" # ============================================= # ===== Test ITPA perturbation in density ===== # ============================================= n0 = 0.00720655 c = [0.491230, 0.298228, 0.198739, 0.521298] - pert = perturbations.ITPA_density(n0=n0, c=c) + maxw_params = { + "n": { + "ITPA_density": { + "given_in_basis": "0", + "n0": n0, + "c": c, + } + } + } - maxwellian = GyroMaxwellian2D(n=(0.0, pert), volume_form=False) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params, volume_form=False) v_perp = 0.1 - meshgrids = xp.meshgrid(e1, [0.0], [0.0], [0.0], v_perp) + meshgrids = np.meshgrid(e1, [0.0], [0.0], [0.0], v_perp) res = maxwellian(*meshgrids).squeeze() - ana_res = n0 * c[3] * xp.exp(-c[2] / c[1] * xp.tanh((e1 - c[0]) / c[2])) / (2 * xp.pi) ** (1 / 2) - ana_res *= xp.exp(-(v_perp**2) / 2.0) + ana_res = n0 * c[3] * np.exp(-c[2] / c[1] * np.tanh((e1 - c[0]) / c[2])) / (2 * np.pi) ** (1 / 2) + ana_res *= np.exp(-(v_perp**2) / 2.0) if show_plot: plt.plot(e1, ana_res, label="analytical") @@ -1044,7 +1025,7 @@ def test_maxwellian_2d_perturbed(Nel, show_plot=False): plt.ylabel("f(eta_1)") plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" @pytest.mark.parametrize("Nel", [[8, 12, 12]]) @@ -1053,85 +1034,88 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): import inspect - import cunumpy as xp import matplotlib.pyplot as plt from struphy.fields_background import equils from struphy.fields_background.base import FluidEquilibriumWithB from struphy.geometry import domains from struphy.initial import perturbations - from struphy.initial.base import Perturbation from struphy.kinetic_background.maxwellians import GyroMaxwellian2D + from struphy.utils.arrays import xp as np - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) + maxw_params_mhd = { + "n": "fluid_background", + "u_para": "fluid_background", + "vth_para": "fluid_background", + "vth_perp": "fluid_background", + } + + maxw_params_1 = { + "n": 1.0, + "u_para": "fluid_background", + "vth_para": "fluid_background", + "vth_perp": "fluid_background", + } + + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) v1 = [0.0] v2 = [0.0, 2.0] - meshgrids = xp.meshgrid(e1, e2, e3, v1, v2, indexing="ij") - e_meshgrids = xp.meshgrid(e1, e2, e3, indexing="ij") + meshgrids = np.meshgrid(e1, e2, e3, v1, v2, indexing="ij") + e_meshgrids = np.meshgrid(e1, e2, e3, indexing="ij") n_mks = 17 - e1_fl = xp.random.rand(n_mks) - e2_fl = xp.random.rand(n_mks) - e3_fl = xp.random.rand(n_mks) - v1_fl = xp.random.randn(n_mks) - v2_fl = xp.random.rand(n_mks) + e1_fl = np.random.rand(n_mks) + e2_fl = np.random.rand(n_mks) + e3_fl = np.random.rand(n_mks) + v1_fl = np.random.randn(n_mks) + v2_fl = np.random.rand(n_mks) args_fl = [e1_fl, e2_fl, e3_fl, v1_fl, v2_fl] - e_args_fl = xp.concatenate((e1_fl[:, None], e2_fl[:, None], e3_fl[:, None]), axis=1) + e_args_fl = np.concatenate((e1_fl[:, None], e2_fl[:, None], e3_fl[:, None]), axis=1) for key, val in inspect.getmembers(equils): if inspect.isclass(val) and val.__module__ == equils.__name__: - print(f"{key =}") + print(f"{key = }") if "DESCequilibrium" in key and not with_desc: - print(f"Attention: {with_desc =}, DESC not tested here !!") + print(f"Attention: {with_desc = }, DESC not tested here !!") continue if "GVECequilibrium" in key: - print("Attention: flat (marker) evaluation not tested for GVEC at the moment.") + print(f"Attention: flat (marker) evaluation not tested for GVEC at the moment.") mhd_equil = val() if not isinstance(mhd_equil, FluidEquilibriumWithB): continue - print(f"{mhd_equil.params =}") + print(f"{mhd_equil.params = }") if "AdhocTorus" in key: mhd_equil.domain = domains.HollowTorus( - a1=1e-3, - a2=mhd_equil.params["a"], - R0=mhd_equil.params["R0"], - tor_period=1, + a1=1e-3, a2=mhd_equil.params["a"], R0=mhd_equil.params["R0"], tor_period=1 ) elif "EQDSKequilibrium" in key: mhd_equil.domain = domains.Tokamak(equilibrium=mhd_equil) elif "CircularTokamak" in key: mhd_equil.domain = domains.HollowTorus( - a1=1e-3, - a2=mhd_equil.params["a"], - R0=mhd_equil.params["R0"], - tor_period=1, + a1=1e-3, a2=mhd_equil.params["a"], R0=mhd_equil.params["R0"], tor_period=1 ) elif "HomogenSlab" in key: mhd_equil.domain = domains.Cuboid() elif "ShearedSlab" in key: mhd_equil.domain = domains.Cuboid( r1=mhd_equil.params["a"], - r2=mhd_equil.params["a"] * 2 * xp.pi, - r3=mhd_equil.params["R0"] * 2 * xp.pi, + r2=mhd_equil.params["a"] * 2 * np.pi, + r3=mhd_equil.params["R0"] * 2 * np.pi, ) elif "ShearFluid" in key: mhd_equil.domain = domains.Cuboid( - r1=mhd_equil.params["a"], - r2=mhd_equil.params["b"], - r3=mhd_equil.params["c"], + r1=mhd_equil.params["a"], r2=mhd_equil.params["b"], r3=mhd_equil.params["c"] ) elif "ScrewPinch" in key: mhd_equil.domain = domains.HollowCylinder( - a1=1e-3, - a2=mhd_equil.params["a"], - Lz=mhd_equil.params["R0"] * 2 * xp.pi, + a1=1e-3, a2=mhd_equil.params["a"], Lz=mhd_equil.params["R0"] * 2 * np.pi ) else: try: @@ -1139,74 +1123,56 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): except: print(f"Not setting domain for {key}.") - maxwellian = GyroMaxwellian2D( - n=(mhd_equil.n0, None), - u_para=(mhd_equil.u_para0, None), - vth_para=(mhd_equil.vth0, None), - vth_perp=(mhd_equil.vth0, None), - volume_form=False, - ) + maxwellian = GyroMaxwellian2D(maxw_params=maxw_params_mhd, equil=mhd_equil, volume_form=False) - maxwellian_1 = GyroMaxwellian2D( - n=(1.0, None), - u_para=(mhd_equil.u_para0, None), - vth_para=(mhd_equil.vth0, None), - vth_perp=(mhd_equil.vth0, None), - volume_form=False, - ) + maxwellian_1 = GyroMaxwellian2D(maxw_params=maxw_params_1, equil=mhd_equil, volume_form=False) # test meshgrid evaluation n0 = mhd_equil.n0(*e_meshgrids) - assert xp.allclose(maxwellian(*meshgrids)[:, :, :, 0, 0], n0 * maxwellian_1(*meshgrids)[:, :, :, 0, 0]) + assert np.allclose(maxwellian(*meshgrids)[:, :, :, 0, 0], n0 * maxwellian_1(*meshgrids)[:, :, :, 0, 0]) - assert xp.allclose(maxwellian(*meshgrids)[:, :, :, 0, 1], n0 * maxwellian_1(*meshgrids)[:, :, :, 0, 1]) + assert np.allclose(maxwellian(*meshgrids)[:, :, :, 0, 1], n0 * maxwellian_1(*meshgrids)[:, :, :, 0, 1]) # test flat evaluation if "GVECequilibrium" in key: pass else: - assert xp.allclose(maxwellian(*args_fl), mhd_equil.n0(e_args_fl) * maxwellian_1(*args_fl)) - assert xp.allclose(maxwellian.n(e1_fl, e2_fl, e3_fl), mhd_equil.n0(e_args_fl)) + assert np.allclose(maxwellian(*args_fl), mhd_equil.n0(e_args_fl) * maxwellian_1(*args_fl)) + assert np.allclose(maxwellian.n(e1_fl, e2_fl, e3_fl), mhd_equil.n0(e_args_fl)) u_maxw = maxwellian.u(e1_fl, e2_fl, e3_fl) tmp_jv = mhd_equil.jv(e_args_fl) / mhd_equil.n0(e_args_fl) tmp_unit_b1 = mhd_equil.unit_b1(e_args_fl) # j_parallel = jv.b1 j_para = sum([ji * bi for ji, bi in zip(tmp_jv, tmp_unit_b1)]) - assert xp.allclose(u_maxw[0], j_para) + assert np.allclose(u_maxw[0], j_para) vth_maxw = maxwellian.vth(e1_fl, e2_fl, e3_fl) - vth_eq = xp.sqrt(mhd_equil.p0(e_args_fl) / mhd_equil.n0(e_args_fl)) - assert all([xp.allclose(v, vth_eq) for v in vth_maxw]) + vth_eq = np.sqrt(mhd_equil.p0(e_args_fl) / mhd_equil.n0(e_args_fl)) + assert all([np.allclose(v, vth_eq) for v in vth_maxw]) # plotting moments if show_plot: - plt.figure(f"{mhd_equil =}", figsize=(24, 16)) + plt.figure(f"{mhd_equil = }", figsize=(24, 16)) x, y, z = mhd_equil.domain(*e_meshgrids) # density plots n_cart = mhd_equil.domain.push(maxwellian.n, *e_meshgrids) - levels = xp.linspace(xp.min(n_cart) - 1e-10, xp.max(n_cart), 20) + levels = np.linspace(np.min(n_cart) - 1e-10, np.max(n_cart), 20) plt.subplot(2, 4, 1) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, 0, :], z[:, 0, :], n_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - z[:, Nel[1] // 2 - 1, :], - n_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], z[:, Nel[1] // 2 - 1, :], n_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("z") else: plt.contourf(x[:, 0, :], y[:, 0, :], n_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - y[:, Nel[1] // 2 - 1, :], - n_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], y[:, Nel[1] // 2 - 1, :], n_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("y") @@ -1229,7 +1195,7 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): # velocity plots us = maxwellian.u(*e_meshgrids) for i, u in enumerate(us[:1]): - levels = xp.linspace(xp.min(u) - 1e-10, xp.max(u), 20) + levels = np.linspace(np.min(u) - 1e-10, np.max(u), 20) plt.subplot(2, 4, 2 + i) if "Slab" in key or "Pinch" in key: @@ -1262,32 +1228,26 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): vth = maxwellian.vth(*e_meshgrids)[0] vth_cart = mhd_equil.domain.push(vth, *e_meshgrids) - levels = xp.linspace(xp.min(vth_cart) - 1e-10, xp.max(vth_cart), 20) + levels = np.linspace(np.min(vth_cart) - 1e-10, np.max(vth_cart), 20) plt.subplot(2, 4, 4) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, 0, :], z[:, 0, :], vth_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - z[:, Nel[1] // 2 - 1, :], - vth_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], z[:, Nel[1] // 2 - 1, :], vth_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("z") else: plt.contourf(x[:, 0, :], y[:, 0, :], vth_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - y[:, Nel[1] // 2 - 1, :], - vth_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], y[:, Nel[1] // 2 - 1, :], vth_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("y") plt.axis("equal") plt.colorbar() - plt.title("Maxwellian thermal velocity $v_t$, top view (e1-e3)") + plt.title(f"Maxwellian thermal velocity $v_t$, top view (e1-e3)") plt.subplot(2, 4, 8) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, :, 0], y[:, :, 0], vth_cart[:, :, 0], levels=levels) @@ -1299,28 +1259,30 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): plt.ylabel("z") plt.axis("equal") plt.colorbar() - plt.title("Maxwellian density $v_t$, poloidal view (e1-e2)") + plt.title(f"Maxwellian density $v_t$, poloidal view (e1-e2)") plt.show() # test perturbations if "EQDSKequilibrium" in key: + maxw_params_zero = {"n": 0.0, "vth_para": 0.0, "vth_perp": 0.0} + for key_2, val_2 in inspect.getmembers(perturbations): - if inspect.isclass(val_2) and val_2.__module__ == perturbations.__name__: + if inspect.isclass(val_2): + print(f"{key_2 = }") pert = val_2() - print(f"{pert =}") - assert isinstance(pert, Perturbation) - - if isinstance(pert, perturbations.Noise): - continue + print(f"{pert = }") + pert_params = { + "n": {key_2: {"given_in_basis": "0"}}, + "u_para": {key_2: {"given_in_basis": "0"}}, + "u_perp": {key_2: {"given_in_basis": "0"}}, + "vth_para": {key_2: {"given_in_basis": "0"}}, + "vth_perp": {key_2: {"given_in_basis": "0"}}, + } # background + perturbation maxwellian_perturbed = GyroMaxwellian2D( - n=(mhd_equil.n0, pert), - u_para=(mhd_equil.u_para0, pert), - vth_para=(mhd_equil.vth0, pert), - vth_perp=(mhd_equil.vth0, pert), - volume_form=False, + maxw_params=maxw_params_mhd, pert_params=pert_params, equil=mhd_equil, volume_form=False ) # test meshgrid evaluation @@ -1331,19 +1293,17 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): # pure perturbation maxwellian_zero_bckgr = GyroMaxwellian2D( - n=(0.0, pert), - u_para=(0.0, pert), - u_perp=(0.0, pert), - vth_para=(0.0, pert), - vth_perp=(0.0, pert), + maxw_params=maxw_params_zero, + pert_params=pert_params, + equil=mhd_equil, volume_form=False, ) - assert xp.allclose(maxwellian_zero_bckgr.n(*e_meshgrids), pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[0], pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[1], pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[0], pert(*e_meshgrids)) - assert xp.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[1], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.n(*e_meshgrids), pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[0], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.u(*e_meshgrids)[1], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[0], pert(*e_meshgrids)) + assert np.allclose(maxwellian_zero_bckgr.vth(*e_meshgrids)[1], pert(*e_meshgrids)) # plotting perturbations if show_plot and "EQDSKequilibrium" in key: # and 'Torus' in key_2: @@ -1353,26 +1313,20 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): # density plots n_cart = mhd_equil.domain.push(maxwellian_zero_bckgr.n, *e_meshgrids) - levels = xp.linspace(xp.min(n_cart) - 1e-10, xp.max(n_cart), 20) + levels = np.linspace(np.min(n_cart) - 1e-10, np.max(n_cart), 20) plt.subplot(2, 4, 1) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, 0, :], z[:, 0, :], n_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - z[:, Nel[1] // 2, :], - n_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], z[:, Nel[1] // 2, :], n_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("z") else: plt.contourf(x[:, 0, :], y[:, 0, :], n_cart[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - y[:, Nel[1] // 2, :], - n_cart[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], y[:, Nel[1] // 2, :], n_cart[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("y") @@ -1395,26 +1349,20 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): # velocity plots us = maxwellian_zero_bckgr.u(*e_meshgrids) for i, u in enumerate(us): - levels = xp.linspace(xp.min(u) - 1e-10, xp.max(u), 20) + levels = np.linspace(np.min(u) - 1e-10, np.max(u), 20) plt.subplot(2, 4, 2 + i) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, 0, :], z[:, 0, :], u[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - z[:, Nel[1] // 2, :], - u[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], z[:, Nel[1] // 2, :], u[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("z") else: plt.contourf(x[:, 0, :], y[:, 0, :], u[:, 0, :], levels=levels) plt.contourf( - x[:, Nel[1] // 2, :], - y[:, Nel[1] // 2, :], - u[:, Nel[1] // 2, :], - levels=levels, + x[:, Nel[1] // 2, :], y[:, Nel[1] // 2, :], u[:, Nel[1] // 2, :], levels=levels ) plt.xlabel("x") plt.ylabel("y") @@ -1438,7 +1386,7 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): vth = maxwellian_zero_bckgr.vth(*e_meshgrids)[0] vth_cart = mhd_equil.domain.push(vth, *e_meshgrids) - levels = xp.linspace(xp.min(vth_cart) - 1e-10, xp.max(vth_cart), 20) + levels = np.linspace(np.min(vth_cart) - 1e-10, np.max(vth_cart), 20) plt.subplot(2, 4, 4) if "Slab" in key or "Pinch" in key: @@ -1463,7 +1411,7 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): plt.ylabel("y") plt.axis("equal") plt.colorbar() - plt.title("Maxwellian perturbed thermal velocity $v_t$, top view (e1-e3)") + plt.title(f"Maxwellian perturbed thermal velocity $v_t$, top view (e1-e3)") plt.subplot(2, 4, 8) if "Slab" in key or "Pinch" in key: plt.contourf(x[:, :, 0], y[:, :, 0], vth_cart[:, :, 0], levels=levels) @@ -1475,7 +1423,7 @@ def test_maxwellian_2d_mhd(Nel, with_desc, show_plot=False): plt.ylabel("z") plt.axis("equal") plt.colorbar() - plt.title("Maxwellian perturbed density $v_t$, poloidal view (e1-e2)") + plt.title(f"Maxwellian perturbed density $v_t$, poloidal view (e1-e2)") plt.show() @@ -1487,19 +1435,18 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): Asserts that the results over the domain and velocity space correspond to the analytical computation. """ - import cunumpy as xp import matplotlib.pyplot as plt from struphy.fields_background import equils from struphy.geometry import domains - from struphy.initial import perturbations from struphy.kinetic_background.maxwellians import CanonicalMaxwellian + from struphy.utils.arrays import xp as np - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) - eta_meshgrid = xp.meshgrid(e1, e2, e3) + eta_meshgrid = np.meshgrid(e1, e2, e3) v_para = 0.01 v_perp = 0.01 @@ -1546,28 +1493,28 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): psi = mhd_equil.psi_r(r) psic = psi - epsilon * B0 * R0 / absB * v_para - psic += epsilon * xp.sign(v_para) * xp.sqrt(2 * (energy - mu * B0)) * R0 * xp.heaviside(energy - mu * B0, 0) + psic += epsilon * np.sign(v_para) * np.sqrt(2 * (energy - mu * B0)) * R0 * np.heaviside(energy - mu * B0, 0) # =========================================================== # ===== Test uniform, isothermal canonical Maxwellian ===== # =========================================================== maxw_params = {"n": 2.0, "vth": 1.0} - maxwellian = CanonicalMaxwellian(n=(2.0, None), vth=(1.0, None)) + maxwellian = CanonicalMaxwellian(maxw_params=maxw_params) # Test constant value at v_para = v_perp = 0.01 res = maxwellian(energy, mu, psic).squeeze() res_ana = ( maxw_params["n"] * 2 - * xp.sqrt(energy / xp.pi) + * np.sqrt(energy / np.pi) / maxw_params["vth"] ** 3 - * xp.exp(-energy / maxw_params["vth"] ** 2) + * np.exp(-energy / maxw_params["vth"] ** 2) ) - assert xp.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" + assert np.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" # test canonical Maxwellian profile in v_para - v_para = xp.linspace(-5, 5, 64) + v_para = np.linspace(-5, 5, 64) v_perp = 0.1 absB = mhd_equil.absB0(0.0, 0.0, 0.0)[0, 0, 0] @@ -1584,18 +1531,18 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): psi = mhd_equil.psi_r(r) psic = psi - epsilon * B0 * R0 / absB * v_para - psic += epsilon * xp.sign(v_para) * xp.sqrt(2 * (energy - mu * B0)) * R0 * xp.heaviside(energy - mu * B0, 0) + psic += epsilon * np.sign(v_para) * np.sqrt(2 * (energy - mu * B0)) * R0 * np.heaviside(energy - mu * B0, 0) - com_meshgrids = xp.meshgrid(energy, mu, psic) + com_meshgrids = np.meshgrid(energy, mu, psic) res = maxwellian(*com_meshgrids).squeeze() res_ana = ( maxw_params["n"] * 2 - * xp.sqrt(com_meshgrids[0] / xp.pi) + * np.sqrt(com_meshgrids[0] / np.pi) / maxw_params["vth"] ** 3 - * xp.exp(-com_meshgrids[0] / maxw_params["vth"] ** 2) + * np.exp(-com_meshgrids[0] / maxw_params["vth"] ** 2) ) if show_plot: @@ -1607,11 +1554,11 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): plt.xlabel("v_para") plt.show() - assert xp.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" + assert np.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" # test canonical Maxwellian profile in v_perp v_para = 0.1 - v_perp = xp.linspace(0, 2.5, 64) + v_perp = np.linspace(0, 2.5, 64) absB = mhd_equil.absB0(0.5, 0.5, 0.5)[0, 0, 0] @@ -1627,18 +1574,18 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): psi = mhd_equil.psi_r(r) psic = psi - epsilon * B0 * R0 / absB * v_para - psic += epsilon * xp.sign(v_para) * xp.sqrt(2 * (energy - mu * B0)) * R0 * xp.heaviside(energy - mu * B0, 0) + psic += epsilon * np.sign(v_para) * np.sqrt(2 * (energy - mu * B0)) * R0 * np.heaviside(energy - mu * B0, 0) - com_meshgrids = xp.meshgrid(energy, mu, psic) + com_meshgrids = np.meshgrid(energy, mu, psic) res = maxwellian(*com_meshgrids).squeeze() res_ana = ( maxw_params["n"] * 2 - * xp.sqrt(com_meshgrids[0] / xp.pi) + * np.sqrt(com_meshgrids[0] / np.pi) / maxw_params["vth"] ** 3 - * xp.exp(-com_meshgrids[0] / maxw_params["vth"] ** 2) + * np.exp(-com_meshgrids[0] / maxw_params["vth"] ** 2) ) if show_plot: @@ -1650,7 +1597,7 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): plt.xlabel("v_perp") plt.show() - assert xp.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" + assert np.allclose(res, res_ana, atol=10e-10), f"{res=},\n {res_ana}" # ============================================= # ===== Test ITPA perturbation in density ===== @@ -1661,15 +1608,14 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): "n": {"ITPA_density": {"n0": n0, "c": c}}, "vth": 1.0, } - pert = perturbations.ITPA_density(n0=n0, c=c) - maxwellian = CanonicalMaxwellian(n=(0.0, pert), equil=mhd_equil, volume_form=False) + maxwellian = CanonicalMaxwellian(maxw_params=maxw_params, equil=mhd_equil) - e1 = xp.linspace(0.0, 1.0, Nel[0]) - e2 = xp.linspace(0.0, 1.0, Nel[1]) - e3 = xp.linspace(0.0, 1.0, Nel[2]) + e1 = np.linspace(0.0, 1.0, Nel[0]) + e2 = np.linspace(0.0, 1.0, Nel[1]) + e3 = np.linspace(0.0, 1.0, Nel[2]) - eta_meshgrid = xp.meshgrid(e1, e2, e3) + eta_meshgrid = np.meshgrid(e1, e2, e3) v_para = 0.01 v_perp = 0.01 @@ -1688,16 +1634,16 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): psi = mhd_equil.psi_r(r[0, :, 0]) psic = psi - epsilon * B0 * R0 / absB * v_para - psic += epsilon * xp.sign(v_para) * xp.sqrt(2 * (energy - mu * B0)) * R0 * xp.heaviside(energy - mu * B0, 0) + psic += epsilon * np.sign(v_para) * np.sqrt(2 * (energy - mu * B0)) * R0 * np.heaviside(energy - mu * B0, 0) - com_meshgrids = xp.meshgrid(energy, mu, psic) + com_meshgrids = np.meshgrid(energy, mu, psic) res = maxwellian(energy, mu, psic).squeeze() # calculate rc rc = maxwellian.rc(psic) - ana_res = n0 * c[3] * xp.exp(-c[2] / c[1] * xp.tanh((rc - c[0]) / c[2])) - ana_res *= 2 * xp.sqrt(energy / xp.pi) / maxw_params["vth"] ** 3 * xp.exp(-energy / maxw_params["vth"] ** 2) + ana_res = n0 * c[3] * np.exp(-c[2] / c[1] * np.tanh((rc - c[0]) / c[2])) + ana_res *= 2 * np.sqrt(energy / np.pi) / maxw_params["vth"] ** 3 * np.exp(-energy / maxw_params["vth"] ** 2) if show_plot: plt.plot(e1, ana_res, label="analytical") @@ -1708,14 +1654,14 @@ def test_canonical_maxwellian_uniform(Nel, show_plot=False): plt.ylabel("f(eta_1)") plt.show() - assert xp.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" + assert np.allclose(res, ana_res, atol=10e-10), f"{res=},\n {ana_res}" if __name__ == "__main__": - # test_maxwellian_3d_uniform(Nel=[64, 1, 1], show_plot=True) - # test_maxwellian_3d_perturbed(Nel=[64, 1, 1], show_plot=True) + # test_maxwellian_3d_uniform(Nel=[64, 1, 1], show_plot=False) + # test_maxwellian_3d_perturbed(Nel=[64, 1, 1], show_plot=False) # test_maxwellian_3d_mhd(Nel=[8, 11, 12], with_desc=None, show_plot=False) # test_maxwellian_2d_uniform(Nel=[64, 1, 1], show_plot=True) # test_maxwellian_2d_perturbed(Nel=[64, 1, 1], show_plot=True) - # test_maxwellian_2d_mhd(Nel=[8, 12, 12], with_desc=None, show_plot=False) - test_canonical_maxwellian_uniform(Nel=[64, 1, 1], show_plot=True) + test_maxwellian_2d_mhd(Nel=[8, 12, 12], with_desc=None, show_plot=False) + # test_canonical_maxwellian_uniform(Nel=[64, 1, 1], show_plot=True) diff --git a/src/struphy/linear_algebra/linalg_kron.py b/src/struphy/linear_algebra/linalg_kron.py index fedd05979..68e2513f7 100644 --- a/src/struphy/linear_algebra/linalg_kron.py +++ b/src/struphy/linear_algebra/linalg_kron.py @@ -13,10 +13,11 @@ [r_M11, rM12, ... , r_MNO]] """ -import cunumpy as xp from scipy.linalg import solve_circulant from scipy.sparse.linalg import splu +from struphy.utils.arrays import xp as np + def kron_matvec_2d(kmat, vec2d): """ @@ -82,9 +83,8 @@ def kron_matvec_3d(kmat, vec3d): ( kmat[2].dot( ((kmat[1].dot(((kmat[0].dot(vec3d.reshape(v0, v1 * v2))).T).reshape(v1, v2 * k0))).T).reshape( - v2, - k0 * k1, - ), + v2, k0 * k1 + ) ) ).T ).reshape(k0, k1, k2) @@ -197,9 +197,9 @@ def kron_matmat_fft_3d(a_vec, b_vec): c_vec = [0, 0, 0] - c_vec[0] = xp.fft.ifft(xp.fft.fft(a_vec[0]) * xp.fft.fft(b_vec[0])) - c_vec[1] = xp.fft.ifft(xp.fft.fft(a_vec[1]) * xp.fft.fft(b_vec[1])) - c_vec[2] = xp.fft.ifft(xp.fft.fft(a_vec[2]) * xp.fft.fft(b_vec[2])) + c_vec[0] = np.fft.ifft(np.fft.fft(a_vec[0]) * np.fft.fft(b_vec[0])) + c_vec[1] = np.fft.ifft(np.fft.fft(a_vec[1]) * np.fft.fft(b_vec[1])) + c_vec[2] = np.fft.ifft(np.fft.fft(a_vec[2]) * np.fft.fft(b_vec[2])) return c_vec @@ -279,9 +279,8 @@ def kron_lusolve_3d(kmatlu, rhs): ( kmatlu[2].solve( ((kmatlu[1].solve(((kmatlu[0].solve(rhs.reshape(r0, r1 * r2))).T).reshape(r1, r2 * r0))).T).reshape( - r2, - r0 * r1, - ), + r2, r0 * r1 + ) ) ).T ).reshape(r0, r1, r2) @@ -322,7 +321,7 @@ def kron_solve_3d(kmat, rhs): splu(kmat[2]).solve( ( (splu(kmat[1]).solve(((splu(kmat[0]).solve(rhs.reshape(r0, r1 * r2))).T).reshape(r1, r2 * r0))).T - ).reshape(r2, r0 * r1), + ).reshape(r2, r0 * r1) ) ).T ).reshape(r0, r1, r2) @@ -363,8 +362,7 @@ def kron_fftsolve_3d(cvec, rhs): ( ( solve_circulant( - cvec[1], - ((solve_circulant(cvec[0], rhs.reshape(r0, r1 * r2))).T).reshape(r1, r2 * r0), + cvec[1], ((solve_circulant(cvec[0], rhs.reshape(r0, r1 * r2))).T).reshape(r1, r2 * r0) ) ).T ).reshape(r2, r0 * r1), diff --git a/src/struphy/linear_algebra/saddle_point.py b/src/struphy/linear_algebra/saddle_point.py index 337664754..5da783b3a 100644 --- a/src/struphy/linear_algebra/saddle_point.py +++ b/src/struphy/linear_algebra/saddle_point.py @@ -1,6 +1,5 @@ from typing import Union -import cunumpy as xp import scipy as sc from psydac.linalg.basic import LinearOperator, Vector from psydac.linalg.block import BlockLinearOperator, BlockVector, BlockVectorSpace @@ -8,6 +7,7 @@ from psydac.linalg.solvers import inverse from struphy.linear_algebra.tests.test_saddlepoint_massmatrices import _plot_residual_norms +from struphy.utils.arrays import xp as np class SaddlePointSolver: @@ -28,7 +28,7 @@ class SaddlePointSolver: } \right) using either the Uzawa iteration :math:`BA^{-1}B^{\top} y = BA^{-1} f` or using on of the solvers given in :mod:`psydac.linalg.solvers`. The prefered solver is GMRES. - The decission which variant to use is given by the type of A. If A is of type list of xp.ndarrays or sc.sparse.csr_matrices, then this class uses the Uzawa algorithm. + The decission which variant to use is given by the type of A. If A is of type list of np.ndarrays or sc.sparse.csr_matrices, then this class uses the Uzawa algorithm. If A is of type LinearOperator or BlockLinearOperator, a solver is used for the inverse. Using the Uzawa algorithm, solution is given by: @@ -41,7 +41,7 @@ class SaddlePointSolver: ---------- A : list, LinearOperator or BlockLinearOperator Upper left block. - Either the entries on the diagonals of block A are given as list of xp.ndarray or sc.sparse.csr_matrix. + Either the entries on the diagonals of block A are given as list of np.ndarray or sc.sparse.csr_matrix. Alternative: Give whole matrice A as LinearOperator or BlockLinearOperator. list: Uzawa algorithm is used. LinearOperator: A solver given in :mod:`psydac.linalg.solvers` is used. Specified by solver_name. @@ -49,16 +49,16 @@ class SaddlePointSolver: B : list, LinearOperator or BlockLinearOperator Lower left block. - Uzwaw Algorithm: All entries of block B are given either as list of xp.ndarray or sc.sparse.csr_matrix. + Uzwaw Algorithm: All entries of block B are given either as list of np.ndarray or sc.sparse.csr_matrix. Solver: Give whole B as LinearOperator or BlocklinearOperator F : list Right hand side of the upper block. - Uzawa: Given as list of xp.ndarray or sc.sparse.csr_matrix. + Uzawa: Given as list of np.ndarray or sc.sparse.csr_matrix. Solver: Given as LinearOperator or BlockLinearOperator Apre : list - The non-inverted preconditioner for entries on the diagonals of block A are given as list of xp.ndarray or sc.sparse.csr_matrix. Only required for the Uzawa algorithm. + The non-inverted preconditioner for entries on the diagonals of block A are given as list of np.ndarray or sc.sparse.csr_matrix. Only required for the Uzawa algorithm. method_to_solve : str Method for the inverses. Choose from 'DirectNPInverse', 'ScipySparse', 'InexactNPInverse' ,'SparseSolver'. Only required for the Uzawa algorithm. @@ -98,14 +98,14 @@ def __init__( if isinstance(A, list): self._variant = "Uzawa" for i in A: - assert isinstance(i, xp.ndarray) or isinstance(i, sc.sparse.csr_matrix) + assert isinstance(i, np.ndarray) or isinstance(i, sc.sparse.csr_matrix) for i in B: - assert isinstance(i, xp.ndarray) or isinstance(i, sc.sparse.csr_matrix) + assert isinstance(i, np.ndarray) or isinstance(i, sc.sparse.csr_matrix) for i in F: - assert isinstance(i, xp.ndarray) or isinstance(i, sc.sparse.csr_matrix) + assert isinstance(i, np.ndarray) or isinstance(i, sc.sparse.csr_matrix) for i in Apre: assert ( - isinstance(i, xp.ndarray) + isinstance(i, np.ndarray) or isinstance(i, sc.sparse.csr_matrix) or isinstance(i, sc.sparse.csr_array) ) @@ -169,9 +169,9 @@ def __init__( self._setup_inverses() # Solution vectors numpy - self._Pnp = xp.zeros(self._B1np.shape[0]) - self._Unp = xp.zeros(self._A[0].shape[1]) - self._Uenp = xp.zeros(self._A[1].shape[1]) + self._Pnp = np.zeros(self._B1np.shape[0]) + self._Unp = np.zeros(self._A[0].shape[1]) + self._Uenp = np.zeros(self._A[1].shape[1]) # Allocate memory for matrices used in solving the system self._rhs0np = self._F[0].copy() self._rhs1np = self._F[1].copy() @@ -195,8 +195,8 @@ def A(self, a): same_A0 = (A0_old != A0_new).nnz == 0 same_A1 = (A1_old != A1_new).nnz == 0 else: - same_A0 = xp.allclose(A0_old, A0_new, atol=1e-10) - same_A1 = xp.allclose(A1_old, A1_new, atol=1e-10) + same_A0 = np.allclose(A0_old, A0_new, atol=1e-10) + same_A1 = np.allclose(A1_old, A1_new, atol=1e-10) if same_A0 and same_A1: need_update = False self._A = a @@ -240,8 +240,8 @@ def Apre(self, a): same_A0 = (A0_old != A0_new).nnz == 0 same_A1 = (A1_old != A1_new).nnz == 0 else: - same_A0 = xp.allclose(A0_old, A0_new, atol=1e-10) - same_A1 = xp.allclose(A1_old, A1_new, atol=1e-10) + same_A0 = np.allclose(A0_old, A0_new, atol=1e-10) + same_A1 = np.allclose(A1_old, A1_new, atol=1e-10) if same_A0 and same_A1: need_update = False self._Apre = a @@ -256,11 +256,11 @@ def __call__(self, U_init=None, Ue_init=None, P_init=None, out=None): Parameters ---------- - U_init : Vector, xp.ndarray or sc.sparse.csr.csr_matrix, optional - Initial guess for the velocity of the ions. If None, initializes to zero. Types xp.ndarray and sc.sparse.csr.csr_matrix can only be given if system should be solved with Uzawa algorithm. + U_init : Vector, np.ndarray or sc.sparse.csr.csr_matrix, optional + Initial guess for the velocity of the ions. If None, initializes to zero. Types np.ndarray and sc.sparse.csr.csr_matrix can only be given if system should be solved with Uzawa algorithm. - Ue_init : Vector, xp.ndarray or sc.sparse.csr.csr_matrix, optional - Initial guess for the velocity of the electrons. If None, initializes to zero. Types xp.ndarray and sc.sparse.csr.csr_matrix can only be given if system should be solved with Uzawa algorithm. + Ue_init : Vector, np.ndarray or sc.sparse.csr.csr_matrix, optional + Initial guess for the velocity of the electrons. If None, initializes to zero. Types np.ndarray and sc.sparse.csr.csr_matrix can only be given if system should be solved with Uzawa algorithm. P_init : Vector, optional Initial guess for the potential. If None, initializes to zero. @@ -304,13 +304,13 @@ def __call__(self, U_init=None, Ue_init=None, P_init=None, out=None): elif self._variant == "Uzawa": info = {} - if self._spectralanalysis: + if self._spectralanalysis == True: self._spectralresult = self._spectral_analysis() else: self._spectralresult = [] # Initialize P to zero or given initial guess - if isinstance(U_init, xp.ndarray) or isinstance(U_init, sc.sparse.csr.csr_matrix): + if isinstance(U_init, np.ndarray) or isinstance(U_init, sc.sparse.csr.csr_matrix): self._Pnp = P_init if P_init is not None else self._P self._Unp = U_init if U_init is not None else self._U self._Uenp = Ue_init if U_init is not None else self._Ue @@ -333,9 +333,9 @@ def __call__(self, U_init=None, Ue_init=None, P_init=None, out=None): self._rhs0np -= self._B1np.transpose().dot(self._Pnp) self._rhs0np -= self._Anp.dot(self._Unp) self._rhs0np += self._F[0] - if not self._preconditioner: + if self._preconditioner == False: self._Unp += self._Anpinv.dot(self._rhs0np) - elif self._preconditioner: + elif self._preconditioner == True: self._Unp += self._Anpinv.dot(self._A11npinv @ self._rhs0np) R1 = self._B1np.dot(self._Unp) @@ -344,17 +344,17 @@ def __call__(self, U_init=None, Ue_init=None, P_init=None, out=None): self._rhs1np -= self._B2np.transpose().dot(self._Pnp) self._rhs1np -= self._Aenp.dot(self._Uenp) self._rhs1np += self._F[1] - if not self._preconditioner: + if self._preconditioner == False: self._Uenp += self._Aenpinv.dot(self._rhs1np) - elif self._preconditioner: + elif self._preconditioner == True: self._Uenp += self._Aenpinv.dot(self._A22npinv @ self._rhs1np) R2 = self._B2np.dot(self._Uenp) # Step 2: Compute residual R = BU (divergence of U) R = R1 + R2 # self._B1np.dot(self._Unp) + self._B2np.dot(self._Uenp) - residual_norm = xp.linalg.norm(R) - residual_normR1 = xp.linalg.norm(R) + residual_norm = np.linalg.norm(R) + residual_normR1 = np.linalg.norm(R) self._residual_norms.append(residual_normR1) # Store residual norm # Check for convergence based on residual norm if residual_norm < self._tol: @@ -382,7 +382,7 @@ def __call__(self, U_init=None, Ue_init=None, P_init=None, out=None): # Return with info if maximum iterations reached info["success"] = False info["niter"] = iteration + 1 - if self._verbose: + if self._verbose == True: _plot_residual_norms(self._residual_norms) return self._Unp, self._Uenp, self._Pnp, info, self._residual_norms, self._spectralresult @@ -413,10 +413,7 @@ def _setup_inverses(self): # === Inverse for A[1] if hasattr(self, "_Aenpinv") and self._is_inverse_still_valid( - self._Aenpinv, - A1, - "A[1]", - pre=self._A22npinv, + self._Aenpinv, A1, "A[1]", pre=self._A22npinv ): pass else: @@ -447,10 +444,10 @@ def _is_inverse_still_valid(self, inv, mat, name="", pre=None): I_approx = inv @ test_mat if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): - I_exact = xp.eye(test_mat.shape[0]) - if not xp.allclose(I_approx, I_exact, atol=1e-6): + I_exact = np.eye(test_mat.shape[0]) + if not np.allclose(I_approx, I_exact, atol=1e-6): diff = I_approx - I_exact - max_abs = xp.abs(diff).max() + max_abs = np.abs(diff).max() print(f"{name} inverse is NOT valid anymore. Max diff: {max_abs:.2e}") return False print(f"{name} inverse is still valid.") @@ -458,7 +455,7 @@ def _is_inverse_still_valid(self, inv, mat, name="", pre=None): elif self._method_to_solve == "ScipySparse": I_exact = sc.sparse.identity(I_approx.shape[0], format=I_approx.format) diff = (I_approx - I_exact).tocoo() - max_abs = xp.abs(diff.data).max() if diff.nnz > 0 else 0.0 + max_abs = np.abs(diff.data).max() if diff.nnz > 0 else 0.0 if max_abs > 1e-6: print(f"{name} inverse is NOT valid anymore.") @@ -471,12 +468,12 @@ def _is_inverse_still_valid(self, inv, mat, name="", pre=None): def _compute_inverse(self, mat, which="matrix"): print(f"Computing inverse for {which} using method {self._method_to_solve}") if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): - return xp.linalg.inv(mat) + return np.linalg.inv(mat) elif self._method_to_solve == "ScipySparse": return sc.sparse.linalg.inv(mat) elif self._method_to_solve == "SparseSolver": solver = SparseSolver(mat) - return solver.solve(xp.eye(mat.shape[0])) + return solver.solve(np.eye(mat.shape[0])) else: raise ValueError(f"Unknown solver method {self._method_to_solve}") @@ -484,14 +481,14 @@ def _spectral_analysis(self): # Spectral analysis # A11 before if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): - eigvalsA11_before, eigvecs_before = xp.linalg.eig(self._A[0]) - condA11_before = xp.linalg.cond(self._A[0]) + eigvalsA11_before, eigvecs_before = np.linalg.eig(self._A[0]) + condA11_before = np.linalg.cond(self._A[0]) elif self._method_to_solve in ("SparseSolver", "ScipySparse"): - eigvalsA11_before, eigvecs_before = xp.linalg.eig(self._A[0].toarray()) - condA11_before = xp.linalg.cond(self._A[0].toarray()) + eigvalsA11_before, eigvecs_before = np.linalg.eig(self._A[0].toarray()) + condA11_before = np.linalg.cond(self._A[0].toarray()) maxbeforeA11 = max(eigvalsA11_before) - maxbeforeA11_abs = xp.max(xp.abs(eigvalsA11_before)) - minbeforeA11_abs = xp.min(xp.abs(eigvalsA11_before)) + maxbeforeA11_abs = np.max(np.abs(eigvalsA11_before)) + minbeforeA11_abs = np.min(np.abs(eigvalsA11_before)) minbeforeA11 = min(eigvalsA11_before) specA11_bef = maxbeforeA11 / minbeforeA11 specA11_bef_abs = maxbeforeA11_abs / minbeforeA11_abs @@ -500,18 +497,18 @@ def _spectral_analysis(self): # print(f'{minbeforeA11_abs = }') # print(f'{minbeforeA11 = }') # print(f'{specA11_bef = }') - print(f"{specA11_bef_abs =}") + print(f"{specA11_bef_abs = }") # A22 before if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): - eigvalsA22_before, eigvecs_before = xp.linalg.eig(self._A[1]) - condA22_before = xp.linalg.cond(self._A[1]) + eigvalsA22_before, eigvecs_before = np.linalg.eig(self._A[1]) + condA22_before = np.linalg.cond(self._A[1]) elif self._method_to_solve in ("SparseSolver", "ScipySparse"): - eigvalsA22_before, eigvecs_before = xp.linalg.eig(self._A[1].toarray()) - condA22_before = xp.linalg.cond(self._A[1].toarray()) + eigvalsA22_before, eigvecs_before = np.linalg.eig(self._A[1].toarray()) + condA22_before = np.linalg.cond(self._A[1].toarray()) maxbeforeA22 = max(eigvalsA22_before) - maxbeforeA22_abs = xp.max(xp.abs(eigvalsA22_before)) - minbeforeA22_abs = xp.min(xp.abs(eigvalsA22_before)) + maxbeforeA22_abs = np.max(np.abs(eigvalsA22_before)) + minbeforeA22_abs = np.min(np.abs(eigvalsA22_before)) minbeforeA22 = min(eigvalsA22_before) specA22_bef = maxbeforeA22 / minbeforeA22 specA22_bef_abs = maxbeforeA22_abs / minbeforeA22_abs @@ -520,19 +517,19 @@ def _spectral_analysis(self): # print(f'{minbeforeA22_abs = }') # print(f'{minbeforeA22 = }') # print(f'{specA22_bef = }') - print(f"{specA22_bef_abs =}") - print(f"{condA22_before =}") + print(f"{specA22_bef_abs = }") + print(f"{condA22_before = }") - if self._preconditioner: + if self._preconditioner == True: # A11 after preconditioning with its inverse if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): - eigvalsA11_after_prec, eigvecs_after = xp.linalg.eig(self._A11npinv @ self._A[0]) # Implement this + eigvalsA11_after_prec, eigvecs_after = np.linalg.eig(self._A11npinv @ self._A[0]) # Implement this elif self._method_to_solve in ("SparseSolver", "ScipySparse"): - eigvalsA11_after_prec, eigvecs_after = xp.linalg.eig((self._A11npinv @ self._A[0]).toarray()) + eigvalsA11_after_prec, eigvecs_after = np.linalg.eig((self._A11npinv @ self._A[0]).toarray()) maxafterA11_prec = max(eigvalsA11_after_prec) minafterA11_prec = min(eigvalsA11_after_prec) - maxafterA11_abs_prec = xp.max(xp.abs(eigvalsA11_after_prec)) - minafterA11_abs_prec = xp.min(xp.abs(eigvalsA11_after_prec)) + maxafterA11_abs_prec = np.max(np.abs(eigvalsA11_after_prec)) + minafterA11_abs_prec = np.min(np.abs(eigvalsA11_after_prec)) specA11_aft_prec = maxafterA11_prec / minafterA11_prec specA11_aft_abs_prec = maxafterA11_abs_prec / minafterA11_abs_prec # print(f'{maxafterA11_prec = }') @@ -540,19 +537,19 @@ def _spectral_analysis(self): # print(f'{minafterA11_abs_prec = }') # print(f'{minafterA11_prec = }') # print(f'{specA11_aft_prec = }') - print(f"{specA11_aft_abs_prec =}") + print(f"{specA11_aft_abs_prec = }") # A22 after preconditioning with its inverse if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): - eigvalsA22_after_prec, eigvecs_after = xp.linalg.eig(self._A22npinv @ self._A[1]) # Implement this - condA22_after = xp.linalg.cond(self._A22npinv @ self._A[1]) + eigvalsA22_after_prec, eigvecs_after = np.linalg.eig(self._A22npinv @ self._A[1]) # Implement this + condA22_after = np.linalg.cond(self._A22npinv @ self._A[1]) elif self._method_to_solve in ("SparseSolver", "ScipySparse"): - eigvalsA22_after_prec, eigvecs_after = xp.linalg.eig((self._A22npinv @ self._A[1]).toarray()) - condA22_after = xp.linalg.cond((self._A22npinv @ self._A[1]).toarray()) + eigvalsA22_after_prec, eigvecs_after = np.linalg.eig((self._A22npinv @ self._A[1]).toarray()) + condA22_after = np.linalg.cond((self._A22npinv @ self._A[1]).toarray()) maxafterA22_prec = max(eigvalsA22_after_prec) minafterA22_prec = min(eigvalsA22_after_prec) - maxafterA22_abs_prec = xp.max(xp.abs(eigvalsA22_after_prec)) - minafterA22_abs_prec = xp.min(xp.abs(eigvalsA22_after_prec)) + maxafterA22_abs_prec = np.max(np.abs(eigvalsA22_after_prec)) + minafterA22_abs_prec = np.min(np.abs(eigvalsA22_after_prec)) specA22_aft_prec = maxafterA22_prec / minafterA22_prec specA22_aft_abs_prec = maxafterA22_abs_prec / minafterA22_abs_prec # print(f'{maxafterA22_prec = }') @@ -560,7 +557,7 @@ def _spectral_analysis(self): # print(f'{minafterA22_abs_prec = }') # print(f'{minafterA22_prec = }') # print(f'{specA22_aft_prec = }') - print(f"{specA22_aft_abs_prec =}") + print(f"{specA22_aft_abs_prec = }") return condA22_before, specA22_bef_abs, condA11_before, condA22_after, specA22_aft_abs_prec diff --git a/src/struphy/linear_algebra/schur_solver.py b/src/struphy/linear_algebra/schur_solver.py index dd41af54b..c29a50db8 100644 --- a/src/struphy/linear_algebra/schur_solver.py +++ b/src/struphy/linear_algebra/schur_solver.py @@ -2,8 +2,6 @@ from psydac.linalg.block import BlockLinearOperator, BlockVector from psydac.linalg.solvers import inverse -from struphy.linear_algebra.solver import SolverParameters - class SchurSolver: r"""Solves for :math:`x^{n+1}` in the block system @@ -48,23 +46,13 @@ class SchurSolver: Must correspond to the chosen solver. """ - def __init__( - self, - A: LinearOperator, - BC: LinearOperator, - solver_name: str, - precond=None, # TODO: add Preconditioner base class - solver_params: SolverParameters = None, - ): + def __init__(self, A: LinearOperator, BC: LinearOperator, solver_name: str, **solver_params): assert isinstance(A, LinearOperator) assert isinstance(BC, LinearOperator) assert A.domain == BC.domain assert A.codomain == BC.codomain - if solver_params is None: - solver_params = SolverParameters() - # linear operators self._A = A self._BC = BC @@ -76,12 +64,10 @@ def __init__( # initialize solver with dummy matrix A self._solver_name = solver_name - kwargs = solver_params.__dict__ - kwargs.pop("info") - if precond is not None: - kwargs["pc"] = precond + if solver_params["pc"] is None: + solver_params.pop("pc") - self._solver = inverse(A, solver_name, **kwargs) + self._solver = inverse(A, solver_name, **solver_params) # right-hand side vector (avoids temporary memory allocation!) self._rhs = A.codomain.zeros() diff --git a/src/struphy/linear_algebra/tests/test_saddle_point_propagator.py b/src/struphy/linear_algebra/tests/test_saddle_point_propagator.py index 3aa3f4ab0..94c4e7fab 100644 --- a/src/struphy/linear_algebra/tests/test_saddle_point_propagator.py +++ b/src/struphy/linear_algebra/tests/test_saddle_point_propagator.py @@ -1,12 +1,11 @@ import pytest -@pytest.mark.skip @pytest.mark.mpi_skip @pytest.mark.parametrize("Nel", [[16, 1, 1], [32, 1, 1]]) @pytest.mark.parametrize("p", [[1, 1, 1], [2, 1, 1]]) @pytest.mark.parametrize("spl_kind", [[True, True, True]]) -@pytest.mark.parametrize("dirichlet_bc", [((False, False), (False, False), (False, False))]) +@pytest.mark.parametrize("dirichlet_bc", [[[False, False], [False, False], [False, False]]]) @pytest.mark.parametrize("mapping", [["Cuboid", {"l1": 0.0, "r1": 1.0, "l2": 0.0, "r2": 1.0, "l3": 0.0, "r3": 1.0}]]) @pytest.mark.parametrize("epsilon", [0.000000001]) @pytest.mark.parametrize("dt", [0.001]) @@ -21,8 +20,6 @@ def test_propagator1D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): from struphy.feec.utilities import compare_arrays from struphy.fields_background.equils import HomogenSlab from struphy.geometry import domains - from struphy.initial import perturbations - from struphy.models.variables import FEECVariable from struphy.propagators.propagators_fields import TwoFluidQuasiNeutralFull mpi_comm = MPI.COMM_WORLD @@ -60,57 +57,42 @@ def test_propagator1D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): bas_ops = BasisProjectionOperators(derham, domain, eq_mhd=eq_mhd) # Manufactured solutions - uvec = FEECVariable(space="Hdiv") - u_evec = FEECVariable(space="Hdiv") - potentialvec = FEECVariable(space="L2") - uinitial = FEECVariable(space="Hdiv") - - pp_u = perturbations.ManufacturedSolutionVelocity() - pp_ue = perturbations.ManufacturedSolutionVelocity(species="Electrons") - pp_potential = perturbations.ManufacturedSolutionPotential() - - # pp_u = { - # "ManufacturedSolutionVelocity": { - # "given_in_basis": ["physical", None, None], - # "species": "Ions", - # "comp": "0", - # "dimension": "1D", - # } - # } - # pp_ue = { - # "ManufacturedSolutionVelocity": { - # "given_in_basis": ["physical", None, None], - # "species": "Electrons", - # "comp": "0", - # "dimension": "1D", - # } - # } - # pp_potential = { - # "ManufacturedSolutionPotential": { - # "given_in_basis": "physical", - # "dimension": "1D", - # } - # } - - uvec.add_perturbation(pp_u) - uvec.allocate(derham, domain, eq_mhd) - - u_evec.add_perturbation(pp_ue) - u_evec.allocate(derham, domain, eq_mhd) - - potentialvec.add_perturbation(pp_potential) - potentialvec.allocate(derham, domain, eq_mhd) - - uinitial.allocate(derham, domain, eq_mhd) - - # uvec.initialize_coeffs(domain=domain, pert_params=pp_u) - # u_evec.initialize_coeffs(domain=domain, pert_params=pp_ue) - # potentialvec.initialize_coeffs(domain=domain, pert_params=pp_potential) + uvec = derham.create_spline_function("u", "Hdiv") + u_evec = derham.create_spline_function("u_e", "Hdiv") + potentialvec = derham.create_spline_function("potential", "L2") + uinitial = derham.create_spline_function("u", "Hdiv") + + pp_u = { + "ManufacturedSolutionVelocity": { + "given_in_basis": ["physical", None, None], + "species": "Ions", + "comp": "0", + "dimension": "1D", + } + } + pp_ue = { + "ManufacturedSolutionVelocity": { + "given_in_basis": ["physical", None, None], + "species": "Electrons", + "comp": "0", + "dimension": "1D", + } + } + pp_potential = { + "ManufacturedSolutionPotential": { + "given_in_basis": "physical", + "dimension": "1D", + } + } + + uvec.initialize_coeffs(domain=domain, pert_params=pp_u) + u_evec.initialize_coeffs(domain=domain, pert_params=pp_ue) + potentialvec.initialize_coeffs(domain=domain, pert_params=pp_potential) # Save manufactured solution to compare it later with the outcome of the propagator - uvec_initial = uvec.spline.vector.copy() - u_evec_initial = u_evec.spline.vector.copy() - potentialvec_initial = potentialvec.spline.vector.copy() + uvec_initial = uvec.vector.copy() + u_evec_initial = u_evec.vector.copy() + potentialvec_initial = potentialvec.vector.copy() solver = {} solver["type"] = ["gmres", None] @@ -127,9 +109,9 @@ def test_propagator1D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): # Starting with initial condition u=0 and ue and phi start with manufactured solution prop = TwoFluidQuasiNeutralFull( - uinitial.spline.vector, - u_evec.spline.vector, - potentialvec.spline.vector, + uinitial.vector, + u_evec.vector, + potentialvec.vector, stab_sigma=epsilon, D1_dt=dt, variant="Uzawa", @@ -212,12 +194,11 @@ def test_propagator1D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): import pytest -@pytest.mark.skip @pytest.mark.mpi_skip @pytest.mark.parametrize("Nel", [[16, 16, 1], [32, 32, 1]]) @pytest.mark.parametrize("p", [[1, 1, 1], [2, 2, 1]]) @pytest.mark.parametrize("spl_kind", [[True, True, True]]) -@pytest.mark.parametrize("dirichlet_bc", [((False, False), (False, False), (False, False))]) +@pytest.mark.parametrize("dirichlet_bc", [[[False, False], [False, False], [False, False]]]) @pytest.mark.parametrize("mapping", [["Cuboid", {"l1": 0.0, "r1": 1.0, "l2": 0.0, "r2": 1.0, "l3": 0.0, "r3": 1.0}]]) @pytest.mark.parametrize("epsilon", [0.001]) @pytest.mark.parametrize("dt", [0.01]) @@ -232,8 +213,7 @@ def test_propagator2D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): from struphy.feec.utilities import compare_arrays from struphy.fields_background.equils import HomogenSlab from struphy.geometry import domains - from struphy.models.variables import FEECVariable - from struphy.propagators.propagators_fields import TwoFluidQuasiNeutralFull + from struphy.propagators import TwoFluidQuasiNeutralFull mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() @@ -270,9 +250,9 @@ def test_propagator2D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): bas_ops = BasisProjectionOperators(derham, domain, eq_mhd=eq_mhd) # Manufactured solutions - uvec = FEECVariable(space="Hdiv") - u_evec = FEECVariable(space="Hdiv") - potentialvec = FEECVariable(space="L2") + uvec = derham.create_spline_function("u", "Hdiv") + u_evec = derham.create_spline_function("u_e", "Hdiv") + potentialvec = derham.create_spline_function("potential", "L2") pp_u = { "ManufacturedSolutionVelocity": { @@ -306,7 +286,7 @@ def test_propagator2D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): "ManufacturedSolutionPotential": { "given_in_basis": "physical", "dimension": "2D", - }, + } } uvec.initialize_coeffs(domain=domain, pert_params=pp_u) @@ -406,15 +386,6 @@ def test_propagator2D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): if __name__ == "__main__": - test_propagator1D( - [16, 1, 1], - [2, 2, 1], - [True, True, True], - [[False, False], [False, False], [False, False]], - ["Cuboid", {"l1": 0.0, "r1": 1.0, "l2": 0.0, "r2": 1.0, "l3": 0.0, "r3": 1.0}], - 0.001, - 0.01, - ) # test_propagator2D( # [16, 16, 1], # [1, 1, 1], @@ -424,15 +395,15 @@ def test_propagator2D(Nel, p, spl_kind, dirichlet_bc, mapping, epsilon, dt): # 0.001, # 0.01, # ) - # test_propagator2D( - # [16, 16, 1], - # [2, 2, 1], - # [True, True, True], - # [[False, False], [False, False], [False, False]], - # ["Cuboid", {"l1": 0.0, "r1": 1.0, "l2": 0.0, "r2": 1.0, "l3": 0.0, "r3": 1.0}], - # 0.001, - # 0.01, - # ) + test_propagator2D( + [16, 16, 1], + [2, 2, 1], + [True, True, True], + [[False, False], [False, False], [False, False]], + ["Cuboid", {"l1": 0.0, "r1": 1.0, "l2": 0.0, "r2": 1.0, "l3": 0.0, "r3": 1.0}], + 0.001, + 0.01, + ) # test_propagator2D( # [32, 32, 1], # [2, 2, 1], diff --git a/src/struphy/linear_algebra/tests/test_saddlepoint_massmatrices.py b/src/struphy/linear_algebra/tests/test_saddlepoint_massmatrices.py index 823584bc9..43d07a895 100644 --- a/src/struphy/linear_algebra/tests/test_saddlepoint_massmatrices.py +++ b/src/struphy/linear_algebra/tests/test_saddlepoint_massmatrices.py @@ -6,14 +6,13 @@ @pytest.mark.parametrize("Nel", [[12, 8, 1]]) @pytest.mark.parametrize("p", [[3, 3, 1]]) @pytest.mark.parametrize("spl_kind", [[False, True, True]]) -@pytest.mark.parametrize("dirichlet_bc", [((False, False), (False, False), (False, False))]) +@pytest.mark.parametrize("dirichlet_bc", [[[False, False], [False, False], [False, False]]]) @pytest.mark.parametrize("mapping", [["Cuboid", {"l1": 0.0, "r1": 2.0, "l2": 0.0, "r2": 3.0, "l3": 0.0, "r3": 6.0}]]) def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, mapping, show_plots=False): """Test saddle-point-solver with manufactured solutions.""" import time - import cunumpy as xp import scipy as sc from psydac.ddm.mpi import mpi as MPI from psydac.linalg.basic import IdentityOperator @@ -30,6 +29,7 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m from struphy.geometry import domains from struphy.initial import perturbations from struphy.linear_algebra.saddle_point import SaddlePointSolver + from struphy.utils.arrays import xp as np mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() @@ -107,7 +107,7 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m Cnp = derhamnumpy.curl.toarray() # Dnp = D.toarray() # Cnp = C.toarray() - if derham.with_local_projectors: + if derham.with_local_projectors == True: S21np = S21.toarray else: S21np = S21.toarray_struphy() @@ -121,7 +121,7 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m Cnp = derhamnumpy.curl.tosparse() # Dnp = D.tosparse() # Cnp = C.tosparse() - if derham.with_local_projectors: + if derham.with_local_projectors == True: S21np = S21.tosparse else: S21np = S21.toarray_struphy(is_sparse=True) @@ -132,12 +132,12 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m A11np = M2np / dt + nu * (Dnp.T @ M3np @ Dnp + S21np.T @ Cnp.T @ M2np @ Cnp @ S21np) - M2Bnp if method_to_solve in ("DirectNPInverse", "InexactNPInverse"): A22np = ( - stab_sigma * xp.identity(A11np.shape[0]) + stab_sigma * np.identity(A11np.shape[0]) + nue * (Dnp.T @ M3np @ Dnp + S21np.T @ Cnp.T @ M2np @ Cnp @ S21np) + M2Bnp ) # Preconditioner - _A22np_pre = stab_sigma * xp.identity(A22np.shape[0]) # + nue*(Dnp.T @ M3np @ Dnp) + _A22np_pre = stab_sigma * np.identity(A22np.shape[0]) # + nue*(Dnp.T @ M3np @ Dnp) _A11np_pre = M2np / dt # + nu * (Dnp.T @ M3np @ Dnp) elif method_to_solve in ("SparseSolver", "ScipySparse"): A22np = ( @@ -201,9 +201,9 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m - (B[0, 1].T).dot(y1_rdm) ) TestDiv = -B1.dot(x1) + B2.dot(x2) - RestDiv = xp.linalg.norm(TestDiv.toarray()) - RestA = xp.linalg.norm(TestA.toarray()) - RestAe = xp.linalg.norm(TestAe.toarray()) + RestDiv = np.linalg.norm(TestDiv.toarray()) + RestA = np.linalg.norm(TestA.toarray()) + RestAe = np.linalg.norm(TestAe.toarray()) print(f"{RestA =}") print(f"{RestAe =}") print(f"{RestDiv =}") @@ -218,10 +218,10 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m - (nue * (Dnp.T @ M3np @ Dnp + S21np.T @ Cnp.T @ M2np @ Cnp @ S21np) + M2Bnp).dot(x2np) - B2np.T.dot(ynp) ) - RestAnp = xp.linalg.norm(TestAnp) - RestAenp = xp.linalg.norm(TestAenp) + RestAnp = np.linalg.norm(TestAnp) + RestAenp = np.linalg.norm(TestAenp) TestDivnp = -B1np.dot(x1np) + B2np.dot(x2np) - RestDivnp = xp.linalg.norm(TestDivnp) + RestDivnp = np.linalg.norm(TestDivnp) print(f"{RestAnp =}") print(f"{RestAenp =}") print(f"{RestDivnp =}") @@ -242,7 +242,7 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m TestA11dot = TestA11.dot(x1) compare_arrays(TestA11dot, TestA11composeddot, mpi_rank, atol=1e-5) # compare_arrays(TestA11dot, TestA11npdot, mpi_rank, atol=1e-5) - print("Comparison numpy to psydac succesfull.") + print(f"Comparison numpy to psydac succesfull.") M2pre = MassMatrixPreconditioner(mass_mats.M2) @@ -270,7 +270,7 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m x_uzawa = {} x_uzawa[0] = x_u x_uzawa[1] = x_ue - if show_plots: + if show_plots == True: _plot_residual_norms(residual_norms) elif method_for_solving == "SaddlePointSolverGMRES": # Wrong initialization to check if changed @@ -296,22 +296,22 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m elapsed_time = end_time - start_time print(f"Method execution time: {elapsed_time:.6f} seconds") - if isinstance(x_uzawa[0], xp.ndarray): - # Output as xp.ndarray + if isinstance(x_uzawa[0], np.ndarray): + # Output as np.ndarray Rx1 = x1np - x_uzawa[0] Rx2 = x2np - x_uzawa[1] Ry = ynp - y_uzawa - residualx_normx1 = xp.linalg.norm(Rx1) - residualx_normx2 = xp.linalg.norm(Rx2) - residualy_norm = xp.linalg.norm(Ry) + residualx_normx1 = np.linalg.norm(Rx1) + residualx_normx2 = np.linalg.norm(Rx2) + residualy_norm = np.linalg.norm(Ry) TestRest1 = F1np - A11np.dot(x_uzawa[0]) - B1np.T.dot(y_uzawa) - TestRest1val = xp.max(abs(TestRest1)) + TestRest1val = np.max(abs(TestRest1)) Testoldy1 = F1np - A11np.dot(x_uzawa[0]) - B1np.T.dot(ynp) - Testoldy1val = xp.max(abs(Testoldy1)) + Testoldy1val = np.max(abs(Testoldy1)) TestRest2 = F2np - A22np.dot(x_uzawa[1]) - B2np.T.dot(y_uzawa) - TestRest2val = xp.max(abs(TestRest2)) + TestRest2val = np.max(abs(TestRest2)) Testoldy2 = F2np - A22np.dot(x_uzawa[1]) - B2np.T.dot(ynp) - Testoldy2val = xp.max(abs(Testoldy2)) + Testoldy2val = np.max(abs(Testoldy2)) print(f"{TestRest1val =}") print(f"{TestRest2val =}") print(f"{Testoldy1val =}") @@ -323,24 +323,24 @@ def test_saddlepointsolver(method_for_solving, Nel, p, spl_kind, dirichlet_bc, m compare_arrays(y1_rdm, y_uzawa, mpi_rank, atol=1e-5) compare_arrays(x1, x_uzawa[0], mpi_rank, atol=1e-5) compare_arrays(x2, x_uzawa[1], mpi_rank, atol=1e-5) - print(f"{info =}") + print(f"{info = }") elif isinstance(x_uzawa[0], BlockVector): # Output as Blockvector Rx1 = x1 - x_uzawa[0] Rx2 = x2 - x_uzawa[1] Ry = y1_rdm - y_uzawa - residualx_normx1 = xp.linalg.norm(Rx1.toarray()) - residualx_normx2 = xp.linalg.norm(Rx2.toarray()) - residualy_norm = xp.linalg.norm(Ry.toarray()) + residualx_normx1 = np.linalg.norm(Rx1.toarray()) + residualx_normx2 = np.linalg.norm(Rx2.toarray()) + residualy_norm = np.linalg.norm(Ry.toarray()) TestRest1 = F1 - A11.dot(x_uzawa[0]) - B1T.dot(y_uzawa) - TestRest1val = xp.max(abs(TestRest1.toarray())) + TestRest1val = np.max(abs(TestRest1.toarray())) Testoldy1 = F1 - A11.dot(x_uzawa[0]) - B1T.dot(y1_rdm) - Testoldy1val = xp.max(abs(Testoldy1.toarray())) + Testoldy1val = np.max(abs(Testoldy1.toarray())) TestRest2 = F2 - A22.dot(x_uzawa[1]) - B2T.dot(y_uzawa) - TestRest2val = xp.max(abs(TestRest2.toarray())) + TestRest2val = np.max(abs(TestRest2.toarray())) Testoldy2 = F2 - A22.dot(x_uzawa[1]) - B2T.dot(y1_rdm) - Testoldy2val = xp.max(abs(Testoldy2.toarray())) + Testoldy2val = np.max(abs(Testoldy2.toarray())) # print(f"{TestRest1val =}") # print(f"{TestRest2val =}") # print(f"{Testoldy1val =}") @@ -372,15 +372,16 @@ def _plot_residual_norms(residual_norms): def _plot_velocity(data_reshaped): - import cunumpy as xp import matplotlib import matplotlib.pyplot as plt + from struphy.utils.arrays import xp as np + matplotlib.use("Agg") - x = xp.linspace(0, 1, 30) - y = xp.linspace(0, 1, 30) - X, Y = xp.meshgrid(x, y) + x = np.linspace(0, 1, 30) + y = np.linspace(0, 1, 30) + X, Y = np.meshgrid(x, y) plt.figure(figsize=(6, 5)) plt.imshow(data_reshaped.T, cmap="viridis", origin="lower", extent=[0, 1, 0, 1]) diff --git a/src/struphy/linear_algebra/tests/test_stencil_dot_kernels.py b/src/struphy/linear_algebra/tests/test_stencil_dot_kernels.py index d2c2238ff..f91d6872d 100644 --- a/src/struphy/linear_algebra/tests/test_stencil_dot_kernels.py +++ b/src/struphy/linear_algebra/tests/test_stencil_dot_kernels.py @@ -13,13 +13,13 @@ def test_1d(Nel, p, spl_kind, domain_ind, codomain_ind): a) the result from kernel in struphy.linear_algebra.stencil_dot_kernels.matvec_1d_kernel b) the result from Stencil .dot with precompiled=True""" - import cunumpy as xp from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL from psydac.ddm.mpi import mpi as MPI from psydac.linalg.stencil import StencilMatrix, StencilVector from struphy.feec.psydac_derham import Derham from struphy.linear_algebra.stencil_dot_kernels import matvec_1d_kernel + from struphy.utils.arrays import xp as np # only for M1 Mac users PSYDAC_BACKEND_GPYCCEL["flags"] = "-O3 -march=native -mtune=native -ffast-math -ffree-line-length-none" @@ -78,8 +78,8 @@ def test_1d(Nel, p, spl_kind, domain_ind, codomain_ind): mat_pre._data[p_out + i_loc, d1] = m - i # random vector - # xp.random.seed(123) - x[s_in : e_in + 1] = xp.random.rand(domain.coeff_space.npts[0]) + # np.random.seed(123) + x[s_in : e_in + 1] = np.random.rand(domain.coeff_space.npts[0]) if rank == 0: print(f"spl_kind={spl_kind}") @@ -118,8 +118,8 @@ def test_1d(Nel, p, spl_kind, domain_ind, codomain_ind): print("\nout_ker=", out_ker._data) print("\nout_pre=", out_pre._data) - assert xp.allclose(out_ker._data, out._data) - assert xp.allclose(out_pre._data, out._data) + assert np.allclose(out_ker._data, out._data) + assert np.allclose(out_pre._data, out._data) @pytest.mark.parametrize("Nel", [[12, 16, 20]]) @@ -134,13 +134,13 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): a) the result from kernel in struphy.linear_algebra.stencil_dot_kernels.matvec_1d_kernel b) the result from Stencil .dot with precompiled=True""" - import cunumpy as xp from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL from psydac.ddm.mpi import mpi as MPI from psydac.linalg.stencil import StencilMatrix, StencilVector from struphy.feec.psydac_derham import Derham from struphy.linear_algebra.stencil_dot_kernels import matvec_3d_kernel + from struphy.utils.arrays import xp as np # only for M1 Mac users PSYDAC_BACKEND_GPYCCEL["flags"] = "-O3 -march=native -mtune=native -ffast-math -ffree-line-length-none" @@ -177,16 +177,16 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): x = StencilVector(domain.coeff_space) out_ker = StencilVector(codomain.coeff_space) - s_out = xp.array(mat.codomain.starts) - e_out = xp.array(mat.codomain.ends) - p_out = xp.array(mat.codomain.pads) - s_in = xp.array(mat.domain.starts) - e_in = xp.array(mat.domain.ends) - p_in = xp.array(mat.domain.pads) + s_out = np.array(mat.codomain.starts) + e_out = np.array(mat.codomain.ends) + p_out = np.array(mat.codomain.pads) + s_in = np.array(mat.domain.starts) + e_in = np.array(mat.domain.ends) + p_in = np.array(mat.domain.pads) # random matrix - xp.random.seed(123) - tmp1 = xp.random.rand(*codomain.coeff_space.npts, *[2 * q + 1 for q in p]) + np.random.seed(123) + tmp1 = np.random.rand(*codomain.coeff_space.npts, *[2 * q + 1 for q in p]) mat[ s_out[0] : e_out[0] + 1, s_out[1] : e_out[1] + 1, @@ -207,7 +207,7 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): ] # random vector - tmp2 = xp.random.rand(*domain.coeff_space.npts) + tmp2 = np.random.rand(*domain.coeff_space.npts) x[ s_in[0] : e_in[0] + 1, s_in[1] : e_in[1] + 1, @@ -226,7 +226,7 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): # kernel matvec add = [int(end_in >= end_out) for end_in, end_out in zip(mat.domain.ends, mat.codomain.ends)] - add = xp.array(add) + add = np.array(add) matvec_3d_kernel(mat._data, x._data, out_ker._data, s_in, p_in, add, s_out, e_out, p_out) # precompiled .dot @@ -253,12 +253,12 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): print("\nout_ker[2]=", out_ker._data[p_out[0], p_out[1], :]) print("\nout_pre[2]=", out_pre._data[p_out[0], p_out[1], :]) - assert xp.allclose( + assert np.allclose( out_ker[s_out[0] : e_out[0] + 1, s_out[1] : e_out[1] + 1, s_out[2] : e_out[2] + 1], out[s_out[0] : e_out[0] + 1, s_out[1] : e_out[1] + 1, s_out[2] : e_out[2] + 1], ) - assert xp.allclose( + assert np.allclose( out_pre[s_out[0] : e_out[0] + 1, s_out[1] : e_out[1] + 1, s_out[2] : e_out[2] + 1], out[s_out[0] : e_out[0] + 1, s_out[1] : e_out[1] + 1, s_out[2] : e_out[2] + 1], ) diff --git a/src/struphy/linear_algebra/tests/test_stencil_transpose_kernels.py b/src/struphy/linear_algebra/tests/test_stencil_transpose_kernels.py index 1125a980c..0265ba741 100644 --- a/src/struphy/linear_algebra/tests/test_stencil_transpose_kernels.py +++ b/src/struphy/linear_algebra/tests/test_stencil_transpose_kernels.py @@ -13,13 +13,13 @@ def test_1d(Nel, p, spl_kind, domain_ind, codomain_ind): a) the result from kernel in struphy.linear_algebra.stencil_transpose_kernels.transpose_1d_kernel b) the result from Stencil .transpose with precompiled=True""" - import cunumpy as xp from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL from psydac.ddm.mpi import mpi as MPI from psydac.linalg.stencil import StencilMatrix from struphy.feec.psydac_derham import Derham from struphy.linear_algebra.stencil_transpose_kernels import transpose_1d_kernel + from struphy.utils.arrays import xp as np # only for M1 Mac users PSYDAC_BACKEND_GPYCCEL["flags"] = "-O3 -march=native -mtune=native -ffast-math -ffree-line-length-none" @@ -112,8 +112,8 @@ def test_1d(Nel, p, spl_kind, domain_ind, codomain_ind): print("\nmatT_pre=", matT_pre._data) print("\nmatT_pre.toarray=\n", matT_pre.toarray()) - assert xp.allclose(matT_ker[s_in : e_in + 1, :], matT[s_in : e_in + 1, :]) - assert xp.allclose(matT_pre[s_in : e_in + 1, :], matT[s_in : e_in + 1, :]) + assert np.allclose(matT_ker[s_in : e_in + 1, :], matT[s_in : e_in + 1, :]) + assert np.allclose(matT_pre[s_in : e_in + 1, :], matT[s_in : e_in + 1, :]) @pytest.mark.parametrize("Nel", [[12, 16, 20]]) @@ -128,13 +128,13 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): a) the result from kernel in struphy.linear_algebra.stencil_transpose_kernels.transpose_3d_kernel b) the result from Stencil .transpose with precompiled=True""" - import cunumpy as xp from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL from psydac.ddm.mpi import mpi as MPI from psydac.linalg.stencil import StencilMatrix from struphy.feec.psydac_derham import Derham from struphy.linear_algebra.stencil_transpose_kernels import transpose_3d_kernel + from struphy.utils.arrays import xp as np # only for M1 Mac users PSYDAC_BACKEND_GPYCCEL["flags"] = "-O3 -march=native -mtune=native -ffast-math -ffree-line-length-none" @@ -170,16 +170,16 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): mat_pre = StencilMatrix(domain.coeff_space, codomain.coeff_space, backend=PSYDAC_BACKEND_GPYCCEL, precompiled=True) matT_ker = StencilMatrix(codomain.coeff_space, domain.coeff_space) - s_out = xp.array(mat.codomain.starts) - e_out = xp.array(mat.codomain.ends) - p_out = xp.array(mat.codomain.pads) - s_in = xp.array(mat.domain.starts) - e_in = xp.array(mat.domain.ends) - p_in = xp.array(mat.domain.pads) + s_out = np.array(mat.codomain.starts) + e_out = np.array(mat.codomain.ends) + p_out = np.array(mat.codomain.pads) + s_in = np.array(mat.domain.starts) + e_in = np.array(mat.domain.ends) + p_in = np.array(mat.domain.pads) # random matrix - xp.random.seed(123) - tmp1 = xp.random.rand(*codomain.coeff_space.npts, *[2 * q + 1 for q in p]) + np.random.seed(123) + tmp1 = np.random.rand(*codomain.coeff_space.npts, *[2 * q + 1 for q in p]) mat[ s_out[0] : e_out[0] + 1, s_out[1] : e_out[1] + 1, @@ -208,7 +208,7 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): # kernel transpose add = [int(end_out >= end_in) for end_in, end_out in zip(mat.domain.ends, mat.codomain.ends)] - add = xp.array(add) + add = np.array(add) transpose_3d_kernel(mat._data, matT_ker._data, s_out, p_out, add, s_in, e_in, p_in) # precompiled transpose @@ -237,12 +237,12 @@ def test_3d(Nel, p, spl_kind, domain_ind, codomain_ind): print("\nmatT_ker[2]=", matT_ker._data[p_in[0], p_in[1], :, 1, 1, :]) print("\nmatT_pre[2]=", matT_pre._data[p_in[0], p_in[1], :, 1, 1, :]) - assert xp.allclose( + assert np.allclose( matT_ker[s_in[0] : e_in[0] + 1, s_in[1] : e_in[1] + 1, s_in[2] : e_in[2] + 1], matT[s_in[0] : e_in[0] + 1, s_in[1] : e_in[1] + 1, s_in[2] : e_in[2] + 1], ) - assert xp.allclose( + assert np.allclose( matT_pre[s_in[0] : e_in[0] + 1, s_in[1] : e_in[1] + 1, s_in[2] : e_in[2] + 1], matT[s_in[0] : e_in[0] + 1, s_in[1] : e_in[1] + 1, s_in[2] : e_in[2] + 1], ) diff --git a/src/struphy/main.py b/src/struphy/main.py index 047abea95..4175d1024 100644 --- a/src/struphy/main.py +++ b/src/struphy/main.py @@ -1,74 +1,76 @@ -import copy -import datetime -import glob -import os -import pickle -import shutil -import sysconfig -import time -from typing import Optional, TypedDict - -import cunumpy as xp -import h5py -from line_profiler import profile -from psydac.ddm.mpi import MockMPI -from psydac.ddm.mpi import mpi as MPI -from pyevtk.hl import gridToVTK - -from struphy.fields_background.base import FluidEquilibrium, FluidEquilibriumWithB -from struphy.fields_background.equils import HomogenSlab -from struphy.geometry import domains -from struphy.geometry.base import Domain -from struphy.io.options import BaseUnits, DerhamOptions, EnvironmentOptions, Time, Units -from struphy.io.output_handling import DataContainer -from struphy.io.setup import import_parameters_py, setup_folders -from struphy.models.base import StruphyModel -from struphy.models.species import Species -from struphy.models.variables import FEECVariable -from struphy.pic.base import Particles -from struphy.post_processing.orbits import orbits_tools -from struphy.post_processing.post_processing_tools import ( - create_femfields, - create_vtk, - eval_femfields, - get_params_of_run, - post_process_f, - post_process_markers, - post_process_n_sph, -) -from struphy.profiling.profiling import ProfileManager -from struphy.topology import grids -from struphy.topology.grids import TensorProductGrid -from struphy.utils.clone_config import CloneConfig -from struphy.utils.utils import dict_to_yaml - - -@profile -def run( - model: StruphyModel, +from typing import Optional + + +def main( + model_name: Optional[str], + parameters: dict | str, + path_out: str, *, - params_path: str = None, - env: EnvironmentOptions = EnvironmentOptions(), - base_units: BaseUnits = BaseUnits(), - time_opts: Time = Time(), - domain: Domain = domains.Cuboid(), - equil: FluidEquilibrium = HomogenSlab(), - grid: TensorProductGrid = None, - derham_opts: DerhamOptions = None, + restart: bool = False, + runtime: int = 300, + save_step: int = 1, verbose: bool = False, + supress_out: bool = False, + sort_step: int = 0, + num_clones: int = 1, ): """ Run a Struphy model. Parameters ---------- - model : StruphyModel - The model to run. Check https://struphy.pages.mpcdf.de/struphy/sections/models.html for available models. + model_name : str + The name of the model to run. Type "struphy run --help" in your terminal to see a list of available models. + + parameters : dict | str + The simulation parameters. Can either be a dictionary OR a string (path of .yml parameter file) + + path_out : str + The output directory. Will create a folder if it does not exist OR cleans the folder for new runs. + + restart : bool, optional + Whether to restart a run (default=False). + + runtime : int, optional + Maximum run time of simulation in minutes. Will finish the time integration once this limit is reached (default=300). + + save_step : int, optional + When to save data output: every time step (save_step=1), every second time step (save_step=2), etc (default=1). - params_path : str - Absolute path to .py parameter file. + verbose : bool + Show full screen output. + + supress_out : bool + Whether to supress screen output during time integration. + + sort_step: int, optional + Sort markers in memory every N time steps (default=0, which means markers are sorted only at the start of simulation) + + num_clones: int, optional + Number of domain clones (default=1) """ + import copy + import os + import time + + from psydac.ddm.mpi import MockMPI + from psydac.ddm.mpi import mpi as MPI + from pyevtk.hl import gridToVTK + + from struphy.feec.psydac_derham import SplineFunction + from struphy.fields_background.base import FluidEquilibriumWithB + from struphy.io.output_handling import DataContainer + from struphy.io.setup import pre_processing + from struphy.models import fluid, hybrid, kinetic, toy + from struphy.models.base import StruphyModel + from struphy.profiling.profiling import ProfileManager + from struphy.utils.arrays import xp as np + from struphy.utils.clone_config import CloneConfig + + if sort_step: + from struphy.pic.base import Particles + if isinstance(MPI, MockMPI): comm = None rank = 0 @@ -88,87 +90,30 @@ def run( Barrier() start_simulation = time.time() - # check model - assert hasattr(model, "propagators"), "Attribute 'self.propagators' must be set in model __init__!" - model_name = model.__class__.__name__ - model.verbose = verbose - - if rank == 0: - print(f"\n*** Starting run for model '{model_name}':") - - # meta-data - path_out = env.path_out - restart = env.restart - max_runtime = env.max_runtime - save_step = env.save_step - sort_step = env.sort_step - num_clones = env.num_clones - use_mpi = (not comm is None,) - - meta = {} - meta["platform"] = sysconfig.get_platform() - meta["python version"] = sysconfig.get_python_version() - meta["model name"] = model_name - meta["parameter file"] = params_path - meta["output folder"] = path_out - meta["MPI processes"] = size - meta["use MPI.COMM_WORLD"] = use_mpi - meta["number of domain clones"] = num_clones - meta["restart"] = restart - meta["max wall-clock [min]"] = max_runtime - meta["save interval [steps]"] = save_step - - if rank == 0: - print("\nMETADATA:") - for k, v in meta.items(): - print(f"{k}:".ljust(25), v) - - # creating output folders - setup_folders( + # loading of simulation parameters, creating output folder and printing information to screen + params = pre_processing( + model_name=model_name, + parameters=parameters, path_out=path_out, restart=restart, + max_sim_time=runtime, + save_step=save_step, + mpi_rank=rank, + mpi_size=size, + use_mpi=not comm is None, + num_clones=num_clones, verbose=verbose, ) - # add derived units - units = Units(base_units) + if model_name is None: + assert "model" in params, "If model is not specified, then model: MODEL must be specified in the params!" + model_name = params["model"] + + if rank < 32: + print(f"Rank {rank}: calling struphy/main.py for model {model_name} ...") + if size > 32 and rank == 32: + print(f"Ranks > 31: calling struphy/main.py for model {model_name} ...") - # save parameter file - if rank == 0: - # save python param file - if params_path is not None: - assert params_path[-3:] == ".py" - shutil.copy2( - params_path, - os.path.join(path_out, "parameters.py"), - ) - # pickle struphy objects - else: - with open(os.path.join(path_out, "env.bin"), "wb") as f: - pickle.dump(env, f, pickle.HIGHEST_PROTOCOL) - with open(os.path.join(path_out, "base_units.bin"), "wb") as f: - pickle.dump(base_units, f, pickle.HIGHEST_PROTOCOL) - with open(os.path.join(path_out, "time_opts.bin"), "wb") as f: - pickle.dump(time_opts, f, pickle.HIGHEST_PROTOCOL) - with open(os.path.join(path_out, "domain.bin"), "wb") as f: - # WORKAROUND: cannot pickle pyccelized classes at the moment - tmp_dct = {"name": domain.__class__.__name__, "params": domain.params} - pickle.dump(tmp_dct, f, pickle.HIGHEST_PROTOCOL) - with open(os.path.join(path_out, "equil.bin"), "wb") as f: - # WORKAROUND: cannot pickle pyccelized classes at the moment - if equil is not None: - tmp_dct = {"name": equil.__class__.__name__, "params": equil.params} - else: - tmp_dct = {} - pickle.dump(tmp_dct, f, pickle.HIGHEST_PROTOCOL) - with open(os.path.join(path_out, "grid.bin"), "wb") as f: - pickle.dump(grid, f, pickle.HIGHEST_PROTOCOL) - with open(os.path.join(path_out, "derham_opts.bin"), "wb") as f: - pickle.dump(derham_opts, f, pickle.HIGHEST_PROTOCOL) - with open(os.path.join(path_out, "model_class.bin"), "wb") as f: - pickle.dump(model.__class__, f, pickle.HIGHEST_PROTOCOL) - - # config clones if comm is None: clone_config = None else: @@ -179,65 +124,32 @@ def run( # MPI.COMM_WORLD : comm # within a clone: : sub_comm # between the clones : inter_comm - clone_config = CloneConfig(comm=comm, params=None, num_clones=num_clones) + clone_config = CloneConfig(comm=comm, params=params, num_clones=num_clones) clone_config.print_clone_config() - if model.particle_species: + if "kinetic" in params: clone_config.print_particle_config() - model.clone_config = clone_config - Barrier() - - ## configure model instance + # instantiate Struphy model (will allocate model objects and associated memory) + StruphyModel.verbose = verbose - # units - model.units = units - if model.bulk_species is None: - A_bulk = None - Z_bulk = None - else: - A_bulk = model.bulk_species.mass_number - Z_bulk = model.bulk_species.charge_number - model.units.derive_units( - velocity_scale=model.velocity_scale, - A_bulk=A_bulk, - Z_bulk=Z_bulk, - verbose=verbose, - ) - - # domain and fluid background - model.setup_domain_and_equil(domain, equil) - - # feec - model.allocate_feec(grid, derham_opts) - - # equation paramters - model.setup_equation_params(units=model.units, verbose=verbose) - - # allocate variables - model.allocate_variables(verbose=verbose) - model.allocate_helpers() - - # pass info to propagators - model.allocate_propagators() + objs = [fluid, kinetic, hybrid, toy] + for obj in objs: + try: + model_class = getattr(obj, model_name) + except AttributeError: + pass - # plasma parameters - model.compute_plasma_params(verbose=verbose) + with ProfileManager.profile_region("model_class_setup"): + model = model_class(params=params, comm=comm, clone_config=clone_config) - if rank < 32: - if rank == 0: - print("") - Barrier() - print(f"Rank {rank}: executing main.run() for model {model_name} ...") - - if size > 32 and rank == 32: - print(f"Ranks > 31: executing main.run() for model {model_name} ...") + assert isinstance(model, StruphyModel) # store geometry vtk if rank == 0: grids_log = [ - xp.linspace(1e-6, 1.0, 32), - xp.linspace(0.0, 1.0, 32), - xp.linspace(0.0, 1.0, 32), + np.linspace(1e-6, 1.0, 32), + np.linspace(0.0, 1.0, 32), + np.linspace(0.0, 1.0, 32), ] tmp = model.domain(*grids_log) @@ -262,9 +174,9 @@ def run( # time quantities (current time value, value in seconds and index) time_state = {} - time_state["value"] = xp.zeros(1, dtype=float) - time_state["value_sec"] = xp.zeros(1, dtype=float) - time_state["index"] = xp.zeros(1, dtype=int) + time_state["value"] = np.zeros(1, dtype=float) + time_state["value_sec"] = np.zeros(1, dtype=float) + time_state["index"] = np.zeros(1, dtype=int) # add time quantities to data object for saving for key, val in time_state.items(): @@ -273,22 +185,22 @@ def run( data.add_data({key_time: val}) data.add_data({key_time_restart: val}) - # retrieve time parameters - dt = time_opts.dt - Tend = time_opts.Tend - split_algo = time_opts.split_algo + time_params = params["time"] # set initial conditions for all variables - if restart: + if not restart: + model.initialize_from_params() + + total_steps = str(int(round(time_params["Tend"] / time_params["dt"]))) + + else: model.initialize_from_restart(data) time_state["value"][0] = data.file["restart/time/value"][-1] time_state["value_sec"][0] = data.file["restart/time/value_sec"][-1] time_state["index"][0] = data.file["restart/time/index"][-1] - total_steps = str(int(round((Tend - time_state["value"][0]) / dt))) - else: - total_steps = str(int(round(Tend / dt))) + total_steps = str(int(round((time_params["Tend"] - time_state["value"][0]) / time_params["dt"]))) # compute initial scalars and kinetic data, pass time state to all propagators model.update_scalar_quantities() @@ -305,6 +217,7 @@ def run( print("\nINITIAL SCALAR QUANTITIES:") model.print_scalar_quantities() + split_algo = time_params["split_algo"] print(f"\nSTART TIME STEPPING WITH '{split_algo}' SPLITTING:") # time loop @@ -313,8 +226,8 @@ def run( Barrier() # stop time loop? - break_cond_1 = time_state["value"][0] >= Tend - break_cond_2 = run_time_now > max_runtime + break_cond_1 = time_state["value"][0] >= time_params["Tend"] + break_cond_2 = run_time_now > runtime if break_cond_1 or break_cond_2: # save restart data (other data already saved below) @@ -322,7 +235,6 @@ def run( data.file.close() end_simulation = time.time() if rank == 0: - print(f"\nTime steps done: {time_state['index'][0]}") print( "wall-clock time of simulation [sec]: ", end_simulation - start_simulation, @@ -336,25 +248,24 @@ def run( if isinstance(val, Particles): val.do_sort() t1 = time.time() - if rank == 0 and verbose: + if rank == 0 and not supress_out: message = "Particles sorted | wall clock [s]: {0:8.4f} | sorting duration [s]: {1:8.4f}".format( - run_time_now * 60, - t1 - t0, + run_time_now * 60, t1 - t0 ) print(message, end="\n") print() - # update time and index (round time to 10 decimals for a clean time grid!) - time_state["value"][0] = round(time_state["value"][0] + dt, 10) - time_state["value_sec"][0] = round(time_state["value_sec"][0] + dt * model.units.t, 10) - time_state["index"][0] += 1 - # perform one time step dt t0 = time.time() with ProfileManager.profile_region("model.integrate"): - model.integrate(dt, split_algo) + model.integrate(time_params["dt"], time_params["split_algo"]) t1 = time.time() + # update time and index (round time to 10 decimals for a clean time grid!) + time_state["value"][0] = round(time_state["value"][0] + time_params["dt"], 10) + time_state["value_sec"][0] = round(time_state["value_sec"][0] + time_params["dt"] * model.units["t"], 10) + time_state["index"][0] += 1 + run_time_now = (time.time() - start_simulation) / 60 # update diagnostics data and save data @@ -364,32 +275,43 @@ def run( model.update_markers_to_be_saved() model.update_distr_functions() - # extract FEEC coefficients - feec_species = model.field_species | model.fluid_species | model.diagnostic_species - for species, val in feec_species.items(): - assert isinstance(val, Species) - for variable, subval in val.variables.items(): - assert isinstance(subval, FEECVariable) - spline = subval.spline + # extract FEM coefficients + for key, val in model.em_fields.items(): + if "params" not in key: + field = val["obj"] + assert isinstance(field, SplineFunction) + # in-place extraction of FEM coefficients from field.vector --> field.vector_stencil! + field.extract_coeffs(update_ghost_regions=False) + + for _, val in model.fluid.items(): + for variable, subval in val.items(): + if "params" not in variable: + field = subval["obj"] + assert isinstance(field, SplineFunction) + # in-place extraction of FEM coefficients from field.vector --> field.vector_stencil! + field.extract_coeffs(update_ghost_regions=False) + + for key, val in model.diagnostics.items(): + if "params" not in key: + field = val["obj"] + assert isinstance(field, SplineFunction) # in-place extraction of FEM coefficients from field.vector --> field.vector_stencil! - spline.extract_coeffs(update_ghost_regions=False) + field.extract_coeffs(update_ghost_regions=False) # save data (everything but restart data) data.save_data(keys=save_keys_all) # print current time and scalar quantities to screen - if rank == 0 and verbose: + if rank == 0 and not supress_out: step = str(time_state["index"][0]).zfill(len(total_steps)) message = "time step: " + step + "/" + str(total_steps) - message += " | " + "time: {0:10.5f}/{1:10.5f}".format(time_state["value"][0], Tend) + message += " | " + "time: {0:10.5f}/{1:10.5f}".format(time_state["value"][0], time_params["Tend"]) message += " | " + "phys. time [s]: {0:12.10f}/{1:12.10f}".format( - time_state["value_sec"][0], - Tend * model.units.t, + time_state["value_sec"][0], time_params["Tend"] * model.units["t"] ) message += " | " + "wall clock [s]: {0:8.4f} | last step duration [s]: {1:8.4f}".format( - run_time_now * 60, - t1 - t0, + run_time_now * 60, t1 - t0 ) print(message, end="\n") @@ -398,446 +320,17 @@ def run( # =================================================================== - meta["wall-clock time[min]"] = (end_simulation - start_simulation) / 60 + with open(path_out + "/meta.txt", "a") as f: + # f.write('wall-clock time [min]:'.ljust(30) + str((end_simulation - start_simulation)/60.) + '\n') + f.write(f"{rank} {'wall-clock time[min]: '.ljust(30)}{(end_simulation - start_simulation) / 60}\n") Barrier() - if rank == 0: - # save meta-data - dict_to_yaml(meta, os.path.join(path_out, "meta.yml")) print("Struphy run finished.") if clone_config is not None: clone_config.free() -def pproc( - path: str, - *, - step: int = 1, - celldivide: int = 1, - physical: bool = False, - guiding_center: bool = False, - classify: bool = False, - no_vtk: bool = False, - time_trace: bool = False, -): - """Post-processing finished Struphy runs. - - Parameters - ---------- - path : str - Absolute path of simulation output folder to post-process. - - step : int - Whether to do post-processing at every time step (step=1, default), every second time step (step=2), etc. - - celldivide : int - Grid refinement in evaluation of FEM fields. E.g. celldivide=2 evaluates two points per grid cell. - - physical : bool - Wether to do post-processing into push-forwarded physical (xyz) components of fields. - - guiding_center : bool - Compute guiding-center coordinates (only from Particles6D). - - classify : bool - Classify guiding-center trajectories (passing, trapped or lost). - - no_vtk : bool - whether vtk files creation should be skipped - - time_trace : bool - whether to plot the time trace of each measured region - """ - - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\n*** Start post-processing of {path}:") - - # import parameters - params_in = get_params_of_run(path) - model = params_in.model - domain = params_in.domain - - # create post-processing folder - path_pproc = os.path.join(path, "post_processing") - - try: - os.mkdir(path_pproc) - except: - shutil.rmtree(path_pproc) - os.mkdir(path_pproc) - - if time_trace: - from struphy.post_processing.likwid.plot_time_traces import plot_gantt_chart, plot_time_vs_duration - - path_time_trace = os.path.join(path, "profiling_time_trace.pkl") - plot_time_vs_duration(path_time_trace, output_path=path_pproc) - plot_gantt_chart(path_time_trace, output_path=path_pproc) - return - - # check for fields and kinetic data in hdf5 file that need post processing - file = h5py.File(os.path.join(path, "data/", "data_proc0.hdf5"), "r") - - # save time grid at which post-processing data is created - xp.save(os.path.join(path_pproc, "t_grid.npy"), file["time/value"][::step].copy()) - - if "feec" in file.keys(): - exist_fields = True - else: - exist_fields = False - - if "kinetic" in file.keys(): - exist_kinetic = {"markers": False, "f": False, "n_sph": False} - kinetic_species = [] - kinetic_kinds = [] - for name in file["kinetic"].keys(): - kinetic_species += [name] - kinetic_kinds += [next(iter(model.species[name].variables.values())).space] - - # check for saved markers - if "markers" in file["kinetic"][name]: - exist_kinetic["markers"] = True - # check for saved distribution function - if "f" in file["kinetic"][name]: - exist_kinetic["f"] = True - # check for saved sph density - if "n_sph" in file["kinetic"][name]: - exist_kinetic["n_sph"] = True - else: - exist_kinetic = None - - file.close() - - # field post-processing - if exist_fields: - fields, t_grid = create_femfields(path, params_in=params_in, step=step) - - point_data, grids_log, grids_phy = eval_femfields(params_in, fields, celldivide=[celldivide] * 3) - - if physical: - point_data_phy, grids_log, grids_phy = eval_femfields( - params_in, - fields, - celldivide=[celldivide] * 3, - physical=True, - ) - - # directory for field data - path_fields = os.path.join(path_pproc, "fields_data") - - try: - os.mkdir(path_fields) - except: - shutil.rmtree(path_fields) - os.mkdir(path_fields) - - # save data dicts for each field - for species, vars in point_data.items(): - for name, val in vars.items(): - try: - os.mkdir(os.path.join(path_fields, species)) - except: - pass - - with open(os.path.join(path_fields, species, name + "_log.bin"), "wb") as handle: - pickle.dump(val, handle, protocol=pickle.HIGHEST_PROTOCOL) - - if physical: - with open(os.path.join(path_fields, species, name + "_phy.bin"), "wb") as handle: - pickle.dump(point_data_phy[species][name], handle, protocol=pickle.HIGHEST_PROTOCOL) - - # save grids - with open(os.path.join(path_fields, "grids_log.bin"), "wb") as handle: - pickle.dump(grids_log, handle, protocol=pickle.HIGHEST_PROTOCOL) - - with open(os.path.join(path_fields, "grids_phy.bin"), "wb") as handle: - pickle.dump(grids_phy, handle, protocol=pickle.HIGHEST_PROTOCOL) - - # create vtk files - if not no_vtk: - create_vtk(path_fields, t_grid, grids_phy, point_data) - if physical: - create_vtk(path_fields, t_grid, grids_phy, point_data_phy, physical=True) - - # kinetic post-processing - if exist_kinetic is not None: - # directory for kinetic data - path_kinetics = os.path.join(path_pproc, "kinetic_data") - - try: - os.mkdir(path_kinetics) - except: - shutil.rmtree(path_kinetics) - os.mkdir(path_kinetics) - - # kinetic post-processing for each species - for n, species in enumerate(kinetic_species): - # directory for each species - path_kinetics_species = os.path.join(path_kinetics, species) - - try: - os.mkdir(path_kinetics_species) - except: - shutil.rmtree(path_kinetics_species) - os.mkdir(path_kinetics_species) - - # markers - if exist_kinetic["markers"]: - post_process_markers( - path, - path_kinetics_species, - species, - domain, - kinetic_kinds[n], - step, - ) - - if guiding_center: - assert kinetic_kinds[n] == "Particles6D" - orbits_tools.post_process_orbit_guiding_center(path, path_kinetics_species, species) - - if classify: - orbits_tools.post_process_orbit_classification(path_kinetics_species, species) - - # distribution function - if exist_kinetic["f"]: - if kinetic_kinds[n] == "DeltaFParticles6D": - compute_bckgr = True - else: - compute_bckgr = False - - post_process_f( - path, - params_in, - path_kinetics_species, - species, - step, - compute_bckgr=compute_bckgr, - ) - - # sph density - if exist_kinetic["n_sph"]: - post_process_n_sph( - path, - params_in, - path_kinetics_species, - species, - step, - ) - - -class SimData: - """Holds post-processed Struphy data as attributes. - - Parameters - ---------- - path : str - Absolute path of simulation output folder to post-process. - """ - - def __init__(self, path: str): - self.path = path - self._orbits = {} - self._f = {} - self._spline_values = {} - self._n_sph = {} - self.grids_log: list[xp.ndarray] = None - self.grids_phy: list[xp.ndarray] = None - self.t_grid: xp.ndarray = None - - @property - def orbits(self) -> dict[str, xp.ndarray]: - """Keys: species name. Values: 3d arrays indexed by (n, p, a), where 'n' is the time index, 'p' the particle index and 'a' the attribute index.""" - return self._orbits - - @property - def f(self) -> dict[str, dict[str, dict[str, xp.ndarray]]]: - """Keys: species name. Values: dicts of slice names ('e1_v1' etc.) holding dicts of corresponding xp.arrays for plotting.""" - return self._f - - @property - def spline_values(self) -> dict[str, dict[str, xp.ndarray]]: - """Keys: species name. Values: dicts of variable names with values being 3d arrays on the grid.""" - return self._spline_values - - @property - def n_sph(self) -> dict[str, dict[str, dict[str, xp.ndarray]]]: - """Keys: species name. Values: dicts of view names ('view_0' etc.) holding dicts of corresponding xp.arrays for plotting.""" - return self._n_sph - - @property - def Nt(self) -> dict[str, int]: - """Number of available time points (snap shots) for each species.""" - if not hasattr(self, "_Nt"): - self._Nt = {} - for spec, orbs in self.orbits.items(): - self._Nt[spec] = orbs.shape[0] - return self._Nt - - @property - def Np(self) -> dict[str, int]: - """Number of particle orbits for each species.""" - if not hasattr(self, "_Np"): - self._Np = {} - for spec, orbs in self.orbits.items(): - self._Np[spec] = orbs.shape[1] - return self._Np - - @property - def Nattr(self) -> dict[str, int]: - """Number of particle attributes for each species.""" - if not hasattr(self, "_Nattr"): - self._Nattr = {} - for spec, orbs in self.orbits.items(): - self._Nattr[spec] = orbs.shape[2] - return self._Nattr - - -def load_data(path: str) -> SimData: - """Load data generated during post-processing. - - Parameters - ---------- - path : str - Absolute path of simulation output folder to post-process. - """ - - path_pproc = os.path.join(path, "post_processing") - assert os.path.exists(path_pproc), f"Path {path_pproc} does not exist, run 'pproc' first?" - print("\n*** Loading post-processed simulation data:") - print(f"{path =}") - - simdata = SimData(path) - - # load time grid - simdata.t_grid = xp.load(os.path.join(path_pproc, "t_grid.npy")) - - # data paths - path_fields = os.path.join(path_pproc, "fields_data") - path_kinetic = os.path.join(path_pproc, "kinetic_data") - - # load point data - if os.path.exists(path_fields): - # grids - with open(os.path.join(path_fields, "grids_log.bin"), "rb") as f: - simdata.grids_log = pickle.load(f) - with open(os.path.join(path_fields, "grids_phy.bin"), "rb") as f: - simdata.grids_phy = pickle.load(f) - - # species folders - species = next(os.walk(path_fields))[1] - for spec in species: - simdata._spline_values[spec] = {} - # simdata.arrays[spec] = {} - path_spec = os.path.join(path_fields, spec) - wlk = os.walk(path_spec) - files = next(wlk)[2] - print(f"\nFiles in {path_spec}: {files}") - for file in files: - if ".bin" in file: - var = file.split(".")[0] - with open(os.path.join(path_spec, file), "rb") as f: - # try: - simdata._spline_values[spec][var] = pickle.load(f) - # simdata.arrays[spec][var] = pickle.load(f) - - if os.path.exists(path_kinetic): - # species folders - species = next(os.walk(path_kinetic))[1] - print(f"{species =}") - for spec in species: - path_spec = os.path.join(path_kinetic, spec) - wlk = os.walk(path_spec) - sub_folders = next(wlk)[1] - for folder in sub_folders: - path_dat = os.path.join(path_spec, folder) - sub_wlk = os.walk(path_dat) - - if "orbits" in folder: - files = next(sub_wlk)[2] - Nt = len(files) // 2 - n = 0 - for file in files: - # print(f"{file = }") - if ".npy" in file: - step = int(file.split(".")[0].split("_")[-1]) - tmp = xp.load(os.path.join(path_dat, file)) - if n == 0: - simdata._orbits[spec] = xp.zeros((Nt, *tmp.shape), dtype=float) - simdata._orbits[spec][step] = tmp - n += 1 - - elif "distribution_function" in folder: - simdata._f[spec] = {} - slices = next(sub_wlk)[1] - # print(f"{slices = }") - for sli in slices: - simdata._f[spec][sli] = {} - # print(f"{sli = }") - files = next(sub_wlk)[2] - # print(f"{files = }") - for file in files: - name = file.split(".")[0] - tmp = xp.load(os.path.join(path_dat, sli, file)) - # print(f"{name = }") - simdata._f[spec][sli][name] = tmp - - elif "n_sph" in folder: - simdata._n_sph[spec] = {} - slices = next(sub_wlk)[1] - # print(f"{slices = }") - for sli in slices: - simdata._n_sph[spec][sli] = {} - # print(f"{sli = }") - files = next(sub_wlk)[2] - # print(f"{files = }") - for file in files: - name = file.split(".")[0] - tmp = xp.load(os.path.join(path_dat, sli, file)) - # print(f"{name = }") - simdata._n_sph[spec][sli][name] = tmp - - else: - print(f"{folder =}") - raise NotImplementedError - - print("\nThe following data has been loaded:") - print("\ngrids:") - print(f"{simdata.t_grid.shape =}") - if simdata.grids_log is not None: - print(f"{simdata.grids_log[0].shape =}") - print(f"{simdata.grids_log[1].shape =}") - print(f"{simdata.grids_log[2].shape =}") - if simdata.grids_phy is not None: - print(f"{simdata.grids_phy[0].shape =}") - print(f"{simdata.grids_phy[1].shape =}") - print(f"{simdata.grids_phy[2].shape =}") - print("\nsimdata.spline_values:") - for k, v in simdata.spline_values.items(): - print(f" {k}") - for kk, vv in v.items(): - print(f" {kk}") - print("\nsimdata.orbits:") - for k, v in simdata.orbits.items(): - print(f" {k}") - print("\nsimdata.f:") - for k, v in simdata.f.items(): - print(f" {k}") - for kk, vv in v.items(): - print(f" {kk}") - for kkk, vvv in vv.items(): - print(f" {kkk}") - print("\nsimdata.n_sph:") - for k, v in simdata.n_sph.items(): - print(f" {k}") - for kk, vv in v.items(): - print(f" {kk}") - for kkk, vvv in vv.items(): - print(f" {kkk}") - - return simdata - - if __name__ == "__main__": import argparse import os @@ -853,6 +346,7 @@ def load_data(path: str) -> SimData: # Read struphy state file state = utils.read_state() + o_path = state["o_path"] parser = argparse.ArgumentParser(description="Run an Struphy model.") @@ -873,7 +367,7 @@ def load_data(path: str) -> SimData: "--input", type=str, metavar="FILE", - help="absolute path of parameter file", + help="absolute path of parameter file (.yml)", ) # output (absolute path) @@ -894,9 +388,9 @@ def load_data(path: str) -> SimData: action="store_true", ) - # max_runtime + # runtime parser.add_argument( - "--max-runtime", + "--runtime", type=int, metavar="N", help="maximum wall-clock time of program in minutes (default=300)", @@ -938,6 +432,13 @@ def load_data(path: str) -> SimData: action="store_true", ) + # supress screen output + parser.add_argument( + "--supress-out", + help="supress screen output during time integration", + action="store_true", + ) + parser.add_argument( "--likwid", help="run with Likwid", @@ -971,8 +472,8 @@ def load_data(path: str) -> SimData: config.simulation_label = "" pylikwid_markerinit() with ProfileManager.profile_region("main"): - # solve the model - run( + # Call main + main( args.model, args.input, args.output, @@ -980,6 +481,7 @@ def load_data(path: str) -> SimData: runtime=args.runtime, save_step=args.save_step, verbose=args.verbose, + supress_out=args.supress_out, sort_step=args.sort_step, num_clones=args.nclones, ) diff --git a/src/struphy/models/__init__.py b/src/struphy/models/__init__.py index 0153467f6..22d1dcab8 100644 --- a/src/struphy/models/__init__.py +++ b/src/struphy/models/__init__.py @@ -1,74 +1,76 @@ -# from struphy.models.fluid import ( -# ColdPlasma, -# EulerSPH, -# HasegawaWakatani, -# LinearExtendedMHDuniform, -# LinearMHD, -# ViscoresistiveDeltafMHD, -# ViscoresistiveDeltafMHD_with_q, -# ViscoresistiveLinearMHD, -# ViscoresistiveLinearMHD_with_q, -# ViscoresistiveMHD, -# ViscoresistiveMHD_with_p, -# ViscoresistiveMHD_with_q, -# ViscousFluid, -# ) -# from struphy.models.hybrid import ColdPlasmaVlasov, LinearMHDDriftkineticCC, LinearMHDVlasovCC, LinearMHDVlasovPC -# from struphy.models.kinetic import ( -# DriftKineticElectrostaticAdiabatic, -# LinearVlasovAmpereOneSpecies, -# LinearVlasovMaxwellOneSpecies, -# VlasovAmpereOneSpecies, -# VlasovMaxwellOneSpecies, -# ) -# from struphy.models.toy import ( -# DeterministicParticleDiffusion, -# GuidingCenter, -# Maxwell, -# Poisson, -# PressureLessSPH, -# RandomParticleDiffusion, -# ShearAlfven, -# TwoFluidQuasiNeutralToy, -# VariationalBarotropicFluid, -# VariationalCompressibleFluid, -# VariationalPressurelessFluid, -# Vlasov, -# ) +from struphy.models.fluid import ( + ColdPlasma, + HasegawaWakatani, + IsothermalEulerSPH, + LinearExtendedMHDuniform, + LinearMHD, + ViscoresistiveDeltafMHD, + ViscoresistiveDeltafMHD_with_q, + ViscoresistiveLinearMHD, + ViscoresistiveLinearMHD_with_q, + ViscoresistiveMHD, + ViscoresistiveMHD_with_p, + ViscoresistiveMHD_with_q, + ViscousEulerSPH, + ViscousFluid, +) +from struphy.models.hybrid import ColdPlasmaVlasov, LinearMHDDriftkineticCC, LinearMHDVlasovCC, LinearMHDVlasovPC +from struphy.models.kinetic import ( + DriftKineticElectrostaticAdiabatic, + LinearVlasovAmpereOneSpecies, + LinearVlasovMaxwellOneSpecies, + VlasovAmpereOneSpecies, + VlasovMaxwellOneSpecies, +) +from struphy.models.toy import ( + DeterministicParticleDiffusion, + GuidingCenter, + Maxwell, + Poisson, + PressureLessSPH, + RandomParticleDiffusion, + ShearAlfven, + TwoFluidQuasiNeutralToy, + VariationalBarotropicFluid, + VariationalCompressibleFluid, + VariationalPressurelessFluid, + Vlasov, +) -# __all__ = [ -# "Maxwell", -# "Vlasov", -# "GuidingCenter", -# "ShearAlfven", -# "VariationalPressurelessFluid", -# "VariationalBarotropicFluid", -# "VariationalCompressibleFluid", -# "Poisson", -# "DeterministicParticleDiffusion", -# "RandomParticleDiffusion", -# "PressureLessSPH", -# "TwoFluidQuasiNeutralToy", -# "LinearMHD", -# "LinearExtendedMHDuniform", -# "ColdPlasma", -# "ViscoresistiveMHD", -# "ViscousFluid", -# "ViscoresistiveMHD_with_p", -# "ViscoresistiveLinearMHD", -# "ViscoresistiveDeltafMHD", -# "ViscoresistiveMHD_with_q", -# "ViscoresistiveLinearMHD_with_q", -# "ViscoresistiveDeltafMHD_with_q", -# "EulerSPH", -# "HasegawaWakatani", -# "LinearMHDVlasovCC", -# "LinearMHDVlasovPC", -# "LinearMHDDriftkineticCC", -# "ColdPlasmaVlasov", -# "VlasovAmpereOneSpecies", -# "VlasovMaxwellOneSpecies", -# "LinearVlasovAmpereOneSpecies", -# "LinearVlasovMaxwellOneSpecies", -# "DriftKineticElectrostaticAdiabatic", -# ] +__all__ = [ + "Maxwell", + "Vlasov", + "GuidingCenter", + "ShearAlfven", + "VariationalPressurelessFluid", + "VariationalBarotropicFluid", + "VariationalCompressibleFluid", + "Poisson", + "DeterministicParticleDiffusion", + "RandomParticleDiffusion", + "PressureLessSPH", + "TwoFluidQuasiNeutralToy", + "LinearMHD", + "LinearExtendedMHDuniform", + "ColdPlasma", + "ViscoresistiveMHD", + "ViscousFluid", + "ViscoresistiveMHD_with_p", + "ViscoresistiveLinearMHD", + "ViscoresistiveDeltafMHD", + "ViscoresistiveMHD_with_q", + "ViscoresistiveLinearMHD_with_q", + "ViscoresistiveDeltafMHD_with_q", + "IsothermalEulerSPH", + "ViscousEulerSPH", + "HasegawaWakatani", + "LinearMHDVlasovCC", + "LinearMHDVlasovPC", + "LinearMHDDriftkineticCC", + "ColdPlasmaVlasov", + "VlasovAmpereOneSpecies", + "VlasovMaxwellOneSpecies", + "LinearVlasovAmpereOneSpecies", + "LinearVlasovMaxwellOneSpecies", + "DriftKineticElectrostaticAdiabatic", +] diff --git a/src/struphy/models/base.py b/src/struphy/models/base.py index b484397a0..cf7467585 100644 --- a/src/struphy/models/base.py +++ b/src/struphy/models/base.py @@ -1,213 +1,155 @@ import inspect import operator -import os from abc import ABCMeta, abstractmethod from functools import reduce -from textwrap import indent -import cunumpy as xp import yaml -from line_profiler import profile -from psydac.ddm.mpi import MockMPI from psydac.ddm.mpi import mpi as MPI from psydac.linalg.stencil import StencilVector -import struphy from struphy.feec.basis_projection_ops import BasisProjectionOperators from struphy.feec.mass import WeightedMassOperators from struphy.feec.psydac_derham import SplineFunction from struphy.fields_background.base import FluidEquilibrium, FluidEquilibriumWithB, MHDequilibrium -from struphy.fields_background.equils import HomogenSlab from struphy.fields_background.projected_equils import ( ProjectedFluidEquilibrium, ProjectedFluidEquilibriumWithB, ProjectedMHDequilibrium, ) -from struphy.geometry.base import Domain -from struphy.geometry.domains import Cuboid -from struphy.io.options import BaseUnits, DerhamOptions, Time, Units -from struphy.io.output_handling import DataContainer -from struphy.io.setup import descend_options_dict, setup_derham -from struphy.kinetic_background import maxwellians -from struphy.models.species import DiagnosticSpecies, FieldSpecies, FluidSpecies, ParticleSpecies, Species -from struphy.models.variables import FEECVariable, PICVariable, SPHVariable -from struphy.pic import particles -from struphy.pic.base import Particles +from struphy.io.setup import setup_derham, setup_domain_and_equil from struphy.profiling.profiling import ProfileManager from struphy.propagators.base import Propagator -from struphy.topology.grids import TensorProductGrid +from struphy.utils.arrays import xp as np from struphy.utils.clone_config import CloneConfig -from struphy.utils.utils import dict_to_yaml, read_state +from struphy.utils.utils import dict_to_yaml class StruphyModel(metaclass=ABCMeta): """ Base class for all Struphy models. + Parameters + ---------- + params : dict + Simulation parameters, see from :ref:`params_yml`. + + comm : mpi4py.MPI.Intracomm + MPI communicator for parallel runs. + + clone_config: struphy.utils.CloneConfig + Contains the # TODO + Note ---- All Struphy models are subclasses of ``StruphyModel`` and should be added to ``struphy/models/`` in one of the modules ``fluid.py``, ``kinetic.py``, ``hybrid.py`` or ``toy.py``. """ - ## abstract methods - - @abstractmethod - class Propagators: - pass - - @abstractmethod - def __init__(self): - """Light-weight init of model.""" - - @property - @abstractmethod - def bulk_species() -> Species: - """Bulk species of the plasma. Must be an attribute of species_static().""" + def __init__( + self, + params: dict, + comm: MPI.Intracomm = None, + clone_config: CloneConfig = None, + ): + assert "em_fields" in self.species() + assert "fluid" in self.species() + assert "kinetic" in self.species() + + assert "em_fields" in self.options() + assert "fluid" in self.options() + assert "kinetic" in self.options() + + if params is None: + params = self.generate_default_parameter_file( + save=False, + prompt=False, + ) - @property - @abstractmethod - def velocity_scale() -> str: - """Velocity unit scale of the model. - Must be one of "alfvén", "cyclotron", "light" or "thermal".""" + self._comm_world = comm + self._clone_config = clone_config - @abstractmethod - def allocate_helpers(self): - """Allocate helper arrays that are needed during simulation.""" + self._params = params - @abstractmethod - def update_scalar_quantities(self): - """Specify an update rule for each item in ``scalar_quantities`` using :meth:`update_scalar`.""" + # get rank and size + if self.comm_world is None: + self._rank_world = 0 + else: + self._rank_world = self.comm_world.Get_rank() - ## setup methods + # initialize model variable dictionaries + self._init_variable_dicts() - def setup_equation_params(self, units: Units, verbose=False): - """Set euqation parameters for each fluid and kinetic species.""" - for _, species in self.fluid_species.items(): - assert isinstance(species, FluidSpecies) - species.setup_equation_params(units=units, verbose=verbose) + # compute model units + self._units, self._equation_params = self.model_units( + self.params, + verbose=self.verbose, + comm=self.comm_world, + ) - for _, species in self.particle_species.items(): - assert isinstance(species, ParticleSpecies) - species.setup_equation_params(units=units, verbose=verbose) + # create domain, equilibrium + self._domain, self._equil = setup_domain_and_equil( + params, + units=self.units, + ) - def setup_domain_and_equil(self, domain: Domain, equil: FluidEquilibrium): - """If a numerical equilibirum is used, the domain is taken from this equilibirum.""" - if equil is not None: - self._equil = equil - if "Numerical" in self.equil.__class__.__name__: - self._domain = self.equil.domain - else: - self._domain = domain - self._equil.domain = domain - else: - self._domain = domain - self._equil = None + if self.rank_world == 0 and self.verbose: + print("\nTIME:") + print( + f"time step:".ljust(25), + "{0} ({1:4.2e} s)".format( + params["time"]["dt"], + params["time"]["dt"] * self.units["t"], + ), + ) + print( + f"final time:".ljust(25), + "{0} ({1:4.2e} s)".format( + params["time"]["Tend"], + params["time"]["Tend"] * self.units["t"], + ), + ) + print(f"splitting algo:".ljust(25), params["time"]["split_algo"]) - if MPI.COMM_WORLD.Get_rank() == 0 and self.verbose: print("\nDOMAIN:") - print("type:".ljust(25), self.domain.__class__.__name__) + print(f"type:".ljust(25), self.domain.__class__.__name__) for key, val in self.domain.params.items(): if key not in {"cx", "cy", "cz"}: print((key + ":").ljust(25), val) print("\nFLUID BACKGROUND:") - if self.equil is not None: + if "fluid_background" in params: print("type:".ljust(25), self.equil.__class__.__name__) for key, val in self.equil.params.items(): print((key + ":").ljust(25), val) else: print("None.") - ## species - - @property - def field_species(self) -> dict: - if not hasattr(self, "_field_species"): - self._field_species = {} - for k, v in self.__dict__.items(): - if isinstance(v, FieldSpecies): - self._field_species[k] = v - return self._field_species - - @property - def fluid_species(self) -> dict: - if not hasattr(self, "_fluid_species"): - self._fluid_species = {} - for k, v in self.__dict__.items(): - if isinstance(v, FluidSpecies): - self._fluid_species[k] = v - return self._fluid_species - - @property - def particle_species(self) -> dict: - if not hasattr(self, "_particle_species"): - self._particle_species = {} - for k, v in self.__dict__.items(): - if isinstance(v, ParticleSpecies): - self._particle_species[k] = v - return self._particle_species - - @property - def diagnostic_species(self) -> dict: - if not hasattr(self, "_diagnostic_species"): - self._diagnostic_species = {} - for k, v in self.__dict__.items(): - if isinstance(v, DiagnosticSpecies): - self._diagnostic_species[k] = v - return self._diagnostic_species - - @property - def species(self): - if not hasattr(self, "_species"): - self._species = self.field_species | self.fluid_species | self.particle_species - return self._species - - ## allocate methods - - def allocate_feec(self, grid: TensorProductGrid, derham_opts: DerhamOptions): # create discrete derham sequence - if self.clone_config is None: - derham_comm = MPI.COMM_WORLD - else: - derham_comm = self.clone_config.sub_comm + if "grid" in params: + dims_mask = params["grid"]["dims_mask"] + if dims_mask is None: + dims_mask = [True] * 3 + + if clone_config is None: + derham_comm = self.comm_world + else: + derham_comm = clone_config.sub_comm - if grid is None or derham_opts is None: - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\n{grid =}, {derham_opts =}: no Derham object set up.") - self._derham = None - else: self._derham = setup_derham( - grid, - derham_opts, + params["grid"], comm=derham_comm, domain=self.domain, + mpi_dims_mask=dims_mask, verbose=self.verbose, ) - - # create weighted mass and basis operators - if self.derham is None: - self._mass_ops = None - self._basis_ops = None else: - self._mass_ops = WeightedMassOperators( - self.derham, - self.domain, - verbose=self.verbose, - eq_mhd=self.equil, - ) - - self._basis_ops = BasisProjectionOperators( - self.derham, - self.domain, - verbose=self.verbose, - eq_mhd=self.equil, - ) + self._derham = None + print("\nDERHAM:\nMeshless simulation - no Derham complex set up.") - # create projected equilibrium - if self.derham is None: - self._projected_equil = None - else: + self._projected_equil = None + self._mass_ops = None + if self.derham is not None: + # create projected equilibrium if isinstance(self.equil, MHDequilibrium): self._projected_equil = ProjectedMHDequilibrium( self.equil, @@ -223,32 +165,99 @@ def allocate_feec(self, grid: TensorProductGrid, derham_opts: DerhamOptions): self.equil, self.derham, ) - else: - self._projected_equil = None - def allocate_propagators(self): + # create weighted mass operators + self._mass_ops = WeightedMassOperators( + self.derham, + self.domain, + verbose=self.verbose, + eq_mhd=self.equil, + ) + + # allocate memory for variables + self._pointer = {} + self._allocate_variables() + + # store plasma parameters + if self.rank_world == 0: + self._pparams = self._compute_plasma_params(verbose=self.verbose) + else: + self._pparams = self._compute_plasma_params(verbose=False) + + # if self.rank_world == 0: + # self._show_chosen_options() + # set propagators base class attributes (then available to all propagators) Propagator.derham = self.derham Propagator.domain = self.domain if self.derham is not None: Propagator.mass_ops = self.mass_ops - Propagator.basis_ops = self.basis_ops + Propagator.basis_ops = BasisProjectionOperators( + self.derham, + self.domain, + verbose=self.verbose, + eq_mhd=self.equil, + ) Propagator.projected_equil = self.projected_equil - assert len(self.prop_list) > 0, "No propagators in this model, check the model class." - for prop in self.prop_list: - assert isinstance(prop, Propagator) - prop.allocate() - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nAllocated propagator '{prop.__class__.__name__}'.") + # create dummy lists/dicts to be filled by the sub-class + self._propagators = [] + self._kwargs = {} + self._scalar_quantities = {} + + return params + + @staticmethod + @abstractmethod + def species(): + """Species dictionary of the form {'em_fields': {}, 'fluid': {}, 'kinetic': {}}. + + The dynamical fields and kinetic species of the model. + + Keys of the three sub-dicts are either: + + a) the electromagnetic field/potential names (b_field, e_field) + b) the fluid species names (e.g. mhd) + c) the names of the kinetic species (e.g. electrons, energetic_ions) + + Corresponding values are: + + a) a space ID ("H1", "Hcurl", "Hdiv", "L2" or "H1vec"), + b) a dict with key=variable_name (e.g. n, U, p, ...) and value=space ID ("H1", "Hcurl", "Hdiv", "L2" or "H1vec"), + c) the type of particles ("Particles6D", "Particles5D", ...).""" + pass + + @staticmethod + @abstractmethod + def bulk_species(): + """Name of the bulk species of the plasma. Must be a key of self.fluid or self.kinetic, or None.""" + pass + + @staticmethod + @abstractmethod + def velocity_scale(): + """String that sets the velocity scale unit of the model. + Must be one of "alfvén", "cyclotron" or "light".""" + pass @staticmethod def diagnostics_dct(): """Diagnostics dictionary. Model specific variables (FemField) which is going to be saved during the simulation. """ + pass - ## basic properties + @staticmethod + @abstractmethod + def propagators_dct(cls): + """Dictionary holding the propagators of the model in the sequence they should be called. + Keys are the propagator classes and values are lists holding variable names (str) updated by the propagator.""" + pass + + @abstractmethod + def update_scalar_quantities(self): + """Specify an update rule for each item in ``scalar_quantities`` using :meth:`update_scalar`.""" + pass @property def params(self): @@ -265,15 +274,48 @@ def equation_params(self): """Parameters appearing in model equation due to Struphy normalization.""" return self._equation_params + @property + def comm_world(self): + """MPI_COMM_WORLD communicator.""" + return self._comm_world + + @property + def rank_world(self): + """Global rank.""" + return self._rank_world + @property def clone_config(self): """Config in case domain clones are used.""" return self._clone_config - @clone_config.setter - def clone_config(self, new): - assert isinstance(new, CloneConfig) or new is None - self._clone_config = new + @property + def pointer(self): + """Dictionary pointing to the data structures of the species (Stencil/BlockVector or "Particle" class). + + The keys are the keys from the "species" property. + In case of a fluid species, the keys are like "species_variable".""" + return self._pointer + + @property + def em_fields(self): + """Dictionary of electromagnetic field/potential variables.""" + return self._em_fields + + @property + def fluid(self): + """Dictionary of fluid species.""" + return self._fluid + + @property + def kinetic(self): + """Dictionary of kinetic species.""" + return self._kinetic + + @property + def diagnostics(self): + """Dictionary of diagnostics.""" + return self._diagnostics @property def domain(self): @@ -296,32 +338,15 @@ def projected_equil(self): return self._projected_equil @property - def units(self) -> Units: + def units(self): """All Struphy units.""" return self._units - @units.setter - def units(self, new): - assert isinstance(new, Units) - self._units = new - @property def mass_ops(self): """WeighteMassOperators object, see :ref:`mass_ops`.""" return self._mass_ops - @property - def basis_ops(self): - """Basis projection operators.""" - return self._basis_ops - - @property - def prop_list(self): - """List of Propagator objects.""" - if not hasattr(self, "_prop_list"): - self._prop_list = list(self.propagators.__dict__.values()) - return self._prop_list - @property def prop_fields(self): """Module :mod:`struphy.propagators.propagators_fields`.""" @@ -337,6 +362,11 @@ def prop_markers(self): """Module :mod:`struphy.propagators.propagators_markers`.""" return self._prop_markers + @property + def propagators(self): + """A list of propagator instances for the model.""" + return self._propagators + @property def kwargs(self): """Dictionary holding the keyword arguments for each propagator specified in :attr:`~propagators_cls`. @@ -346,8 +376,6 @@ def kwargs(self): @property def scalar_quantities(self): """A dictionary of scalar quantities to be saved during the simulation.""" - if not hasattr(self, "_scalar_quantities"): - self._scalar_quantities = {} return self._scalar_quantities @property @@ -428,13 +456,13 @@ def getFromDict(dataDict, mapList): def setInDict(dataDict, mapList, value): # Loop over dicitionary and creaty empty dicts where the path does not exist for k in range(len(mapList)): - if mapList[k] not in getFromDict(dataDict, mapList[:k]).keys(): + if not mapList[k] in getFromDict(dataDict, mapList[:k]).keys(): getFromDict(dataDict, mapList[:k])[mapList[k]] = {} getFromDict(dataDict, mapList[:-1])[mapList[-1]] = value # make sure that the base keys are top-level keys for base_key in ["em_fields", "fluid", "kinetic"]: - if base_key not in dct.keys(): + if not base_key in dct.keys(): dct[base_key] = {} if isinstance(species, str): @@ -452,7 +480,7 @@ def setInDict(dataDict, mapList, value): assert key is not None, "Must provide key if option is not a class." setInDict(dct, species + ["options"] + key, option) - def add_scalar(self, name: str, variable: PICVariable | SPHVariable = None, compute=None, summands=None): + def add_scalar(self, name, species=None, compute=None, summands=None): """ Add a scalar to be saved during the simulation. @@ -460,8 +488,8 @@ def add_scalar(self, name: str, variable: PICVariable | SPHVariable = None, comp ---------- name : str Dictionary key for the scalar. - variable : PICVariable | SPHVariable, optional - The variable associated with the scalar. Required if compute is 'from_particles'. + species : str, optional + The species associated with the scalar. Required if compute is 'from_particles'. compute : str, optional Type of scalar, determines the compute operations. Options: 'from_particles' or 'from_field'. Default is None. @@ -472,14 +500,14 @@ def add_scalar(self, name: str, variable: PICVariable | SPHVariable = None, comp assert isinstance(name, str), "name must be a string" if compute == "from_particles": - assert isinstance(variable, (PICVariable, SPHVariable)), f"Variable is needed when {compute =}" - - if not hasattr(self, "_scalar_quantities"): - self._scalar_quantities = {} + assert isinstance( + species, + str, + ), "species must be a string when compute is 'from_particles'" self._scalar_quantities[name] = { - "value": xp.empty(1, dtype=float), - "variable": variable, + "value": np.empty(1, dtype=float), + "species": species, "compute": compute, "summands": summands, } @@ -499,7 +527,7 @@ def update_scalar(self, name, value=None): # Ensure the name is a string assert isinstance(name, str) - variable: PICVariable | SPHVariable = self._scalar_quantities[name]["variable"] + species = self._scalar_quantities[name]["species"] summands = self._scalar_quantities[name]["summands"] compute = self._scalar_quantities[name]["compute"] @@ -524,11 +552,11 @@ def update_scalar(self, name, value=None): assert isinstance(value, float) # Create a numpy array to hold the scalar value - value_array = xp.array([value], dtype=xp.float64) + value_array = np.array([value], dtype=np.float64) # Perform MPI operations based on the compute flags - if "sum_world" in compute_operations and not isinstance(MPI, MockMPI): - MPI.COMM_WORLD.Allreduce( + if "sum_world" in compute_operations and self.comm_world is not None: + self.comm_world.Allreduce( MPI.IN_PLACE, value_array, op=MPI.SUM, @@ -562,7 +590,7 @@ def update_scalar(self, name, value=None): if "divide_n_mks" in compute_operations: # Initialize the total number of markers - n_mks_tot = xp.array([variable.particles.Np]) + n_mks_tot = np.array([self.pointer[species].Np]) value_array /= n_mks_tot # Update the scalar value @@ -584,90 +612,43 @@ def add_time_state(self, time_state): """ assert time_state.size == 1 self._time_state = time_state - for _, prop in self.propagators.__dict__.items(): - if isinstance(prop, Propagator): - prop.add_time_state(time_state) - - @profile - def allocate_variables(self, verbose: bool = False): - """ - Allocate memory for model variables and set initial conditions. - """ - # allocate memory for FE coeffs of electromagnetic fields/potentials - if self.field_species: - for species, spec in self.field_species.items(): - assert isinstance(spec, FieldSpecies) - for k, v in spec.variables.items(): - assert isinstance(v, FEECVariable) - v.allocate( - derham=self.derham, - domain=self.domain, - equil=self.equil, - ) + for prop in self.propagators: + prop.add_time_state(time_state) + + def init_propagators(self): + """Initialize the propagator objects specified in :attr:`~propagators_cls`.""" + if self.rank_world == 0 and self.verbose: + print("\nPROPAGATORS:") + for (prop, variables), (prop2, kwargs_i) in zip(self.propagators_dct().items(), self.kwargs.items()): + assert prop == prop2, ( + f'Propagators {prop} from "self.propagators_dct()" and {prop2} from "self.kwargs" must be identical !!' + ) - # allocate memory for FE coeffs of fluid variables - if self.fluid_species: - for species, spec in self.fluid_species.items(): - assert isinstance(spec, FluidSpecies) - for k, v in spec.variables.items(): - assert isinstance(v, FEECVariable) - v.allocate( - derham=self.derham, - domain=self.domain, - equil=self.equil, - ) + if kwargs_i is None: + if self.rank_world == 0: + print(f'\n-> Propagator "{prop.__name__}" will not be used.') + continue + else: + if self.rank_world == 0 and self.verbose: + print(f'\n-> Initializing propagator "{prop.__name__}"') + print(f"-> for variables {variables}") + print(f"-> with the following parameters:") + for k, v in kwargs_i.items(): + if isinstance(v, StencilVector): + print(f"{k}: {repr(v)}") + else: + print(f"{k}: {v}") - # allocate memory for marker arrays of kinetic variables - if self.particle_species: - for species, spec in self.particle_species.items(): - assert isinstance(spec, ParticleSpecies) - for k, v in spec.variables.items(): - if isinstance(v, PICVariable): - v.allocate( - clone_config=self.clone_config, - derham=self.derham, - domain=self.domain, - equil=self.equil, - projected_equil=self.projected_equil, - verbose=verbose, - ) - if isinstance(v, SPHVariable): - v.allocate( - derham=self.derham, - domain=self.domain, - equil=self.equil, - projected_equil=self.projected_equil, - verbose=verbose, - ) + prop_instance = prop( + *[self.pointer[var] for var in variables], + **kwargs_i, + ) + assert isinstance(prop_instance, Propagator) + self._propagators += [prop_instance] - # allocate memory for FE coeffs of fluid variables - if self.diagnostic_species: - for species, spec in self.diagnostic_species.items(): - assert isinstance(spec, DiagnosticSpecies) - for k, v in spec.variables.items(): - assert isinstance(v, FEECVariable) - v.allocate( - derham=self.derham, - domain=self.domain, - equil=self.equil, - ) + if self.rank_world == 0 and self.verbose: + print("\nInitialization of propagators complete.") - # TODO: allocate memory for FE coeffs of diagnostics - # if self.params.diagnostic_fields is not None: - # for key, val in self.diagnostics.items(): - # if "params" in key: - # continue - # else: - # val["obj"] = self.derham.create_spline_function( - # key, - # val["space"], - # bckgr_params=None, - # pert_params=None, - # ) - - # self._pointer[key] = val["obj"].vector - - @profile def integrate(self, dt, split_algo="LieTrotter"): """ Advance the model by a time step ``dt`` by sequentially calling its Propagators. @@ -683,27 +664,27 @@ def integrate(self, dt, split_algo="LieTrotter"): # first order in time if split_algo == "LieTrotter": - for propagator in self.prop_list: - prop_name = propagator.__class__.__name__ + for propagator in self.propagators: + prop_name = type(propagator).__name__ with ProfileManager.profile_region(prop_name): propagator(dt) # second order in time elif split_algo == "Strang": - assert len(self.prop_list) > 1 + assert len(self.propagators) > 1 - for propagator in self.prop_list[:-1]: + for propagator in self.propagators[:-1]: prop_name = type(propagator).__name__ with ProfileManager.profile_region(prop_name): propagator(dt / 2) - propagator = self.prop_list[-1] + propagator = self.propagators[-1] prop_name = type(propagator).__name__ with ProfileManager.profile_region(prop_name): propagator(dt) - for propagator in self.prop_list[:-1][::-1]: + for propagator in self.propagators[:-1][::-1]: prop_name = type(propagator).__name__ with ProfileManager.profile_region(prop_name): propagator(dt / 2) @@ -713,76 +694,90 @@ def integrate(self, dt, split_algo="LieTrotter"): f"Splitting scheme {split_algo} not available.", ) - @profile def update_markers_to_be_saved(self): """ Writes markers with IDs that are supposed to be saved into corresponding array. """ - for name, species in self.particle_species.items(): - assert isinstance(species, ParticleSpecies) - assert len(species.variables) == 1, "More than 1 variable per kinetic species is not allowed." - for _, var in species.variables.items(): - assert isinstance(var, PICVariable | SPHVariable) - obj = var.particles - assert isinstance(obj, Particles) + from struphy.pic.base import Particles + + for val in self.kinetic.values(): + obj = val["obj"] + assert isinstance(obj, Particles) + + # allocate array for saving markers if not present + if not hasattr(self, "_n_markers_saved"): + n_markers = val["params"]["save_data"].get("n_markers", 0) + + if isinstance(n_markers, float): + if n_markers > 1.0: + self._n_markers_saved = int(n_markers) + else: + self._n_markers_saved = int(obj.n_mks_global * n_markers) + else: + self._n_markers_saved = n_markers - if var.n_to_save > 0: - markers_on_proc = xp.logical_and( + assert self._n_markers_saved <= obj.Np, ( + f"The number of markers for which data should be stored (={self._n_markers_saved}) murst be <= than the total number of markers (={obj.Np})" + ) + if self._n_markers_saved > 0: + val["kinetic_data"]["markers"] = np.zeros( + (self._n_markers_saved, obj.markers.shape[1]), + dtype=float, + ) + + if self._n_markers_saved > 0: + markers_on_proc = np.logical_and( obj.markers[:, -1] >= 0.0, - obj.markers[:, -1] < var.n_to_save, + obj.markers[:, -1] < self._n_markers_saved, ) - n_markers_on_proc = xp.count_nonzero(markers_on_proc) - var.saved_markers[:] = -1.0 - var.saved_markers[:n_markers_on_proc] = obj.markers[markers_on_proc] + n_markers_on_proc = np.count_nonzero(markers_on_proc) + val["kinetic_data"]["markers"][:] = -1.0 + val["kinetic_data"]["markers"][:n_markers_on_proc] = obj.markers[markers_on_proc] - @profile def update_distr_functions(self): """ Writes distribution functions slices that are supposed to be saved into corresponding array. """ + from struphy.pic.base import Particles + dim_to_int = {"e1": 0, "e2": 1, "e3": 2, "v1": 3, "v2": 4, "v3": 5} - for name, species in self.particle_species.items(): - assert isinstance(species, ParticleSpecies) - assert len(species.variables) == 1, "More than 1 variable per kinetic species is not allowed." - for _, var in species.variables.items(): - assert isinstance(var, PICVariable | SPHVariable) - obj = var.particles - assert isinstance(obj, Particles) + for val in self.kinetic.values(): + obj = val["obj"] + assert isinstance(obj, Particles) - if obj.n_cols_diagnostics > 0: - for i in range(obj.n_cols_diagnostics): - str_dn = f"d{i + 1}" - dim_to_int[str_dn] = 3 + obj.vdim + 3 + i + if obj.n_cols_diagnostics > 0: + for i in range(obj.n_cols_diagnostics): + str_dn = f"d{i + 1}" + dim_to_int[str_dn] = 3 + obj.vdim + 3 + i - for bin_plot in species.binning_plots: - comps = bin_plot.slice.split("_") + if "f" in val["params"]["save_data"]: + for slice_i, edges in val["bin_edges"].items(): + comps = slice_i.split("_") components = [False] * (3 + obj.vdim + 3 + obj.n_cols_diagnostics) for comp in comps: components[dim_to_int[comp]] = True - edges = bin_plot.bin_edges - divide_by_jac = bin_plot.divide_by_jac - f_slice, df_slice = obj.binning(components, edges, divide_by_jac=divide_by_jac) + f_slice, df_slice = obj.binning(components, edges) - bin_plot.f[:] = f_slice - bin_plot.df[:] = df_slice + val["kinetic_data"]["f"][slice_i][:] = f_slice + val["kinetic_data"]["df"][slice_i][:] = df_slice - for kd_plot in species.kernel_density_plots: - h1 = 1 / obj.boxes_per_dim[0] - h2 = 1 / obj.boxes_per_dim[1] - h3 = 1 / obj.boxes_per_dim[2] + if "n_sph" in val["params"]["save_data"]: + h1 = 1 / obj.boxes_per_dim[0] + h2 = 1 / obj.boxes_per_dim[1] + h3 = 1 / obj.boxes_per_dim[2] - ndim = xp.count_nonzero([d > 1 for d in obj.boxes_per_dim]) - if ndim == 0: - kernel_type = "gaussian_3d" - else: - kernel_type = "gaussian_" + str(ndim) + "d" + ndim = np.count_nonzero([d > 1 for d in obj.boxes_per_dim]) + if ndim == 0: + kernel_type = "gaussian_3d" + else: + kernel_type = "gaussian_" + str(ndim) + "d" - pts = kd_plot.plot_pts + for i, pts in enumerate(val["plot_pts"]): n_sph = obj.eval_density( *pts, h1=h1, @@ -791,7 +786,7 @@ def update_distr_functions(self): kernel_type=kernel_type, fast=True, ) - kd_plot.n_sph[:] = n_sph + val["kinetic_data"]["n_sph"][i][:] = n_sph def print_scalar_quantities(self): """ @@ -800,176 +795,182 @@ def print_scalar_quantities(self): sq_str = "" for key, scalar_dict in self._scalar_quantities.items(): val = scalar_dict["value"] - assert not xp.isnan(val[0]), f"Scalar {key} is {val[0]}." + assert not np.isnan(val[0]), f"Scalar {key} is {val[0]}." sq_str += key + ": {:14.11f}".format(val[0]) + " " print(sq_str) - # def initialize_from_params(self): - # """ - # Set initial conditions for FE coefficients (electromagnetic and fluid) - # and markers according to parameter file. - # """ - - # # initialize em fields - # if self.field_species: - # with ProfileManager.profile_region("initialize_em_fields"): - # for key, val in self.em_fields.items(): - # if "params" in key: - # continue - # else: - # obj = val["obj"] - # assert isinstance(obj, SplineFunction) - - # obj.initialize_coeffs( - # domain=self.domain, - # bckgr_obj=self.equil, - # ) - - # if self.rank_world == 0 and self.verbose: - # print(f'\nEM field "{key}" was initialized with:') - - # _params = self.em_fields["params"] - - # if "background" in _params: - # if key in _params["background"]: - # bckgr_types = _params["background"][key] - # if bckgr_types is None: - # pass - # else: - # print("background:") - # for _type, _bp in bckgr_types.items(): - # print(" " * 4 + _type, ":") - # for _pname, _pval in _bp.items(): - # print((" " * 8 + _pname + ":").ljust(25), _pval) - # else: - # print("No background.") - # else: - # print("No background.") - - # if "perturbation" in _params: - # if key in _params["perturbation"]: - # pert_types = _params["perturbation"][key] - # if pert_types is None: - # pass - # else: - # print("perturbation:") - # for _type, _pp in pert_types.items(): - # print(" " * 4 + _type, ":") - # for _pname, _pval in _pp.items(): - # print((" " * 8 + _pname + ":").ljust(25), _pval) - # else: - # print("No perturbation.") - # else: - # print("No perturbation.") - - # if len(self.fluid) > 0: - # with ProfileManager.profile_region("initialize_fluids"): - # for species, val in self.fluid.items(): - # for variable, subval in val.items(): - # if "params" in variable: - # continue - # else: - # obj = subval["obj"] - # assert isinstance(obj, SplineFunction) - # obj.initialize_coeffs( - # domain=self.domain, - # bckgr_obj=self.equil, - # species=species, - # ) - - # if self.rank_world == 0 and self.verbose: - # print( - # f'\nFluid species "{species}" was initialized with:', - # ) - - # _params = val["params"] - - # if "background" in _params: - # for variable in val: - # if "params" in variable: - # continue - # if variable in _params["background"]: - # bckgr_types = _params["background"][variable] - # if bckgr_types is None: - # pass - # else: - # print(f"{variable} background:") - # for _type, _bp in bckgr_types.items(): - # print(" " * 4 + _type, ":") - # for _pname, _pval in _bp.items(): - # print((" " * 8 + _pname + ":").ljust(25), _pval) - # else: - # print(f"{variable}: no background.") - # else: - # print("No background.") - - # if "perturbation" in _params: - # for variable in val: - # if "params" in variable: - # continue - # if variable in _params["perturbation"]: - # pert_types = _params["perturbation"][variable] - # if pert_types is None: - # pass - # else: - # print(f"{variable} perturbation:") - # for _type, _pp in pert_types.items(): - # print(" " * 4 + _type, ":") - # for _pname, _pval in _pp.items(): - # print((" " * 8 + _pname + ":").ljust(25), _pval) - # else: - # print(f"{variable}: no perturbation.") - # else: - # print("No perturbation.") - - # # initialize particles - # if len(self.kinetic) > 0: - # with ProfileManager.profile_region("initialize_particles"): - # for species, val in self.kinetic.items(): - # obj = val["obj"] - # assert isinstance(obj, Particles) - - # if self.rank_world == 0 and self.verbose: - # _params = val["params"] - # assert "background" in _params, "Kinetic species must have background." - - # bckgr_types = _params["background"] - # print( - # f'\nKinetic species "{species}" was initialized with:', - # ) - # for _type, _bp in bckgr_types.items(): - # print(_type, ":") - # for _pname, _pval in _bp.items(): - # print((" " * 4 + _pname + ":").ljust(25), _pval) - - # if "perturbation" in _params: - # for variable, pert_types in _params["perturbation"].items(): - # if pert_types is None: - # pass - # else: - # print(f"{variable} perturbation:") - # for _type, _pp in pert_types.items(): - # print(" " * 4 + _type, ":") - # for _pname, _pval in _pp.items(): - # print((" " * 8 + _pname + ":").ljust(25), _pval) - # else: - # print("No perturbation.") - - # obj.draw_markers(sort=True, verbose=self.verbose) - # obj.mpi_sort_markers(do_test=True) - - # if not val["params"]["markers"]["loading"] == "restart": - # if obj.coords == "vpara_mu": - # obj.save_magnetic_moment() - - # obj.draw_markers(sort=True, verbose=self.verbose) - # if self.comm_world is not None: - # obj.mpi_sort_markers(do_test=True) - - # obj.initialize_weights( - # reject_weights=obj.weights_params["reject_weights"], - # threshold=obj.weights_params["threshold"], - # ) + def initialize_from_params(self): + """ + Set initial conditions for FE coefficients (electromagnetic and fluid) + and markers according to parameter file. + """ + + from struphy.feec.psydac_derham import Derham + from struphy.pic.base import Particles + + if self.rank_world == 0 and self.verbose: + print("\nINITIAL CONDITIONS:") + + # initialize em fields + if len(self.em_fields) > 0: + with ProfileManager.profile_region("initialize_em_fields"): + for key, val in self.em_fields.items(): + if "params" in key: + continue + else: + obj = val["obj"] + assert isinstance(obj, SplineFunction) + + obj.initialize_coeffs( + domain=self.domain, + bckgr_obj=self.equil, + ) + + if self.rank_world == 0 and self.verbose: + print(f'\nEM field "{key}" was initialized with:') + + _params = self.em_fields["params"] + + if "background" in _params: + if key in _params["background"]: + bckgr_types = _params["background"][key] + if bckgr_types is None: + pass + else: + print("background:") + for _type, _bp in bckgr_types.items(): + print(" " * 4 + _type, ":") + for _pname, _pval in _bp.items(): + print((" " * 8 + _pname + ":").ljust(25), _pval) + else: + print("No background.") + else: + print("No background.") + + if "perturbation" in _params: + if key in _params["perturbation"]: + pert_types = _params["perturbation"][key] + if pert_types is None: + pass + else: + print("perturbation:") + for _type, _pp in pert_types.items(): + print(" " * 4 + _type, ":") + for _pname, _pval in _pp.items(): + print((" " * 8 + _pname + ":").ljust(25), _pval) + else: + print("No perturbation.") + else: + print("No perturbation.") + + if len(self.fluid) > 0: + with ProfileManager.profile_region("initialize_fluids"): + for species, val in self.fluid.items(): + for variable, subval in val.items(): + if "params" in variable: + continue + else: + obj = subval["obj"] + assert isinstance(obj, SplineFunction) + obj.initialize_coeffs( + domain=self.domain, + bckgr_obj=self.equil, + species=species, + ) + + if self.rank_world == 0 and self.verbose: + print( + f'\nFluid species "{species}" was initialized with:', + ) + + _params = val["params"] + + if "background" in _params: + for variable in val: + if "params" in variable: + continue + if variable in _params["background"]: + bckgr_types = _params["background"][variable] + if bckgr_types is None: + pass + else: + print(f"{variable} background:") + for _type, _bp in bckgr_types.items(): + print(" " * 4 + _type, ":") + for _pname, _pval in _bp.items(): + print((" " * 8 + _pname + ":").ljust(25), _pval) + else: + print(f"{variable}: no background.") + else: + print("No background.") + + if "perturbation" in _params: + for variable in val: + if "params" in variable: + continue + if variable in _params["perturbation"]: + pert_types = _params["perturbation"][variable] + if pert_types is None: + pass + else: + print(f"{variable} perturbation:") + for _type, _pp in pert_types.items(): + print(" " * 4 + _type, ":") + for _pname, _pval in _pp.items(): + print((" " * 8 + _pname + ":").ljust(25), _pval) + else: + print(f"{variable}: no perturbation.") + else: + print("No perturbation.") + + # initialize particles + if len(self.kinetic) > 0: + with ProfileManager.profile_region("initialize_particles"): + for species, val in self.kinetic.items(): + obj = val["obj"] + assert isinstance(obj, Particles) + + if self.rank_world == 0 and self.verbose: + _params = val["params"] + assert "background" in _params, "Kinetic species must have background." + + bckgr_types = _params["background"] + print( + f'\nKinetic species "{species}" was initialized with:', + ) + for _type, _bp in bckgr_types.items(): + print(_type, ":") + for _pname, _pval in _bp.items(): + print((" " * 4 + _pname + ":").ljust(25), _pval) + + if "perturbation" in _params: + for variable, pert_types in _params["perturbation"].items(): + if pert_types is None: + pass + else: + print(f"{variable} perturbation:") + for _type, _pp in pert_types.items(): + print(" " * 4 + _type, ":") + for _pname, _pval in _pp.items(): + print((" " * 8 + _pname + ":").ljust(25), _pval) + else: + print("No perturbation.") + + obj.draw_markers(sort=True, verbose=self.verbose) + if self.comm_world is not None: + obj.mpi_sort_markers(do_test=True) + + if not val["params"]["markers"]["loading"] == "restart": + if obj.coords == "vpara_mu": + obj.save_magnetic_moment() + + if val["space"] != "ParticlesSPH" and obj.f0.coords == "constants_of_motion": + obj.save_constants_of_motion() + + obj.initialize_weights( + reject_weights=obj.weights_params["reject_weights"], + threshold=obj.weights_params["threshold"], + ) def initialize_from_restart(self, data): """ @@ -981,6 +982,9 @@ def initialize_from_restart(self, data): The data object that links to the hdf5 files. """ + from struphy.feec.psydac_derham import Derham + from struphy.pic.base import Particles + # initialize em fields if len(self.em_fields) > 0: for key, val in self.em_fields.items(): @@ -1017,7 +1021,7 @@ def initialize_from_restart(self, data): if self.comm_world is not None: obj.mpi_sort_markers(do_test=True) - def initialize_data_output(self, data: DataContainer, size): + def initialize_data_output(self, data, size): """ Create datasets in hdf5 files according to model unknowns and diagnostics data. @@ -1038,6 +1042,14 @@ def initialize_data_output(self, data: DataContainer, size): Keys of datasets which are saved at the end of a simulation to enable restarts. """ + from psydac.linalg.stencil import StencilVector + + from struphy.feec.psydac_derham import Derham + from struphy.io.output_handling import DataContainer + from struphy.pic.base import Particles + + assert isinstance(data, DataContainer) + # save scalar quantities in group 'scalar/' for key, scalar in self.scalar_quantities.items(): val = scalar["value"] @@ -1053,102 +1065,190 @@ def initialize_data_output(self, data: DataContainer, size): else: pass - # save feec data in group 'feec/' - feec_species = self.field_species | self.fluid_species | self.diagnostic_species - for species, val in feec_species.items(): - assert isinstance(val, Species) - - species_path = os.path.join("feec", species) - species_path_restart = os.path.join("restart", species) - - for variable, subval in val.variables.items(): - assert isinstance(subval, FEECVariable) - spline = subval.spline + # save electromagentic fields/potentials data in group 'feec/' + for key, val in self.em_fields.items(): + if "params" in key: + continue + else: + obj = val["obj"] + assert isinstance(obj, SplineFunction) # in-place extraction of FEM coefficients from field.vector --> field.vector_stencil! - spline.extract_coeffs(update_ghost_regions=False) + obj.extract_coeffs(update_ghost_regions=False) # save numpy array to be updated each time step. - if subval.save_data: - key_field = os.path.join(species_path, variable) + if val["save_data"]: + key_field = "feec/" + key - if isinstance(spline.vector_stencil, StencilVector): + if isinstance(obj.vector_stencil, StencilVector): data.add_data( - {key_field: spline.vector_stencil._data}, + {key_field: obj.vector_stencil._data}, ) else: for n in range(3): - key_component = os.path.join(key_field, str(n + 1)) + key_component = key_field + "/" + str(n + 1) data.add_data( - {key_component: spline.vector_stencil[n]._data}, + {key_component: obj.vector_stencil[n]._data}, ) # save field meta data - data.file[key_field].attrs["space_id"] = spline.space_id - data.file[key_field].attrs["starts"] = spline.starts - data.file[key_field].attrs["ends"] = spline.ends - data.file[key_field].attrs["pads"] = spline.pads + data.file[key_field].attrs["space_id"] = obj.space_id + data.file[key_field].attrs["starts"] = obj.starts + data.file[key_field].attrs["ends"] = obj.ends + data.file[key_field].attrs["pads"] = obj.pads # save numpy array to be updated only at the end of the simulation for restart. - key_field_restart = os.path.join(species_path_restart, variable) + key_field_restart = "restart/" + key - if isinstance(spline.vector_stencil, StencilVector): + if isinstance(obj.vector_stencil, StencilVector): data.add_data( - {key_field_restart: spline.vector_stencil._data}, + {key_field_restart: obj.vector_stencil._data}, ) else: for n in range(3): - key_component_restart = os.path.join(key_field_restart, str(n + 1)) + key_component_restart = key_field_restart + "/" + str(n + 1) data.add_data( - {key_component_restart: spline.vector_stencil[n]._data}, + {key_component_restart: obj.vector_stencil[n]._data}, ) + # save fluid data in group 'feec/' + for species, val in self.fluid.items(): + species_path = "feec/" + species + "_" + species_path_restart = "restart/" + species + "_" + + for variable, subval in val.items(): + if "params" in variable: + continue + else: + obj = subval["obj"] + assert isinstance(obj, SplineFunction) + + # in-place extraction of FEM coefficients from field.vector --> field.vector_stencil! + obj.extract_coeffs(update_ghost_regions=False) + + # save numpy array to be updated each time step. + if subval["save_data"]: + key_field = species_path + variable + + if isinstance(obj.vector_stencil, StencilVector): + data.add_data( + {key_field: obj.vector_stencil._data}, + ) + + else: + for n in range(3): + key_component = key_field + "/" + str(n + 1) + data.add_data( + {key_component: obj.vector_stencil[n]._data}, + ) + + # save field meta data + data.file[key_field].attrs["space_id"] = obj.space_id + data.file[key_field].attrs["starts"] = obj.starts + data.file[key_field].attrs["ends"] = obj.ends + data.file[key_field].attrs["pads"] = obj.pads + + # save numpy array to be updated only at the end of the simulation for restart. + key_field_restart = species_path_restart + variable + + if isinstance(obj.vector_stencil, StencilVector): + data.add_data( + {key_field_restart: obj.vector_stencil._data}, + ) + else: + for n in range(3): + key_component_restart = key_field_restart + "/" + str(n + 1) + data.add_data( + {key_component_restart: obj.vector_stencil[n]._data}, + ) + # save kinetic data in group 'kinetic/' - for name, species in self.particle_species.items(): - assert isinstance(species, ParticleSpecies) - assert len(species.variables) == 1, "More than 1 variable per kinetic species is not allowed." - for varname, var in species.variables.items(): - assert isinstance(var, PICVariable | SPHVariable) - obj = var.particles - assert isinstance(obj, Particles) + for key, val in self.kinetic.items(): + obj = val["obj"] + assert isinstance(obj, Particles) + + key_spec = "kinetic/" + key + key_spec_restart = "restart/" + key + + data.add_data({key_spec_restart: obj._markers}) + + for key1, val1 in val["kinetic_data"].items(): + key_dat = key_spec + "/" + key1 + + # case of "f" and "df" + if isinstance(val1, dict): + for key2, val2 in val1.items(): + key_f = key_dat + "/" + key2 + data.add_data({key_f: val2}) - key_spec = os.path.join("kinetic", name) - key_spec_restart = os.path.join("restart", name) + dims = (len(key2) - 2) // 3 + 1 + for dim in range(dims): + data.file[key_f].attrs["bin_centers" + "_" + str(dim + 1)] = ( + val["bin_edges"][key2][dim][:-1] + + (val["bin_edges"][key2][dim][1] - val["bin_edges"][key2][dim][0]) / 2 + ) + # case of "n_sph" + elif isinstance(val1, list): + for i, v1 in enumerate(val1): + key_n = key_dat + "/view_" + str(i) + data.add_data({key_n: v1}) + # save 1d point values, not meshgrids, because attrs size is limited + eta1 = val["plot_pts"][i][0][:, 0, 0] + eta2 = val["plot_pts"][i][1][0, :, 0] + eta3 = val["plot_pts"][i][2][0, 0, :] + data.file[key_n].attrs["eta1"] = eta1 + data.file[key_n].attrs["eta2"] = eta2 + data.file[key_n].attrs["eta3"] = eta3 + else: + data.add_data({key_dat: val1}) - # restart data - data.add_data({key_spec_restart: obj.markers}) + # save diagnostics data in group 'feec/' + for key, val in self.diagnostics.items(): + if "params" in key: + continue + else: + obj = val["obj"] + assert isinstance(obj, SplineFunction) - # marker data - key_mks = os.path.join(key_spec, "markers") - data.add_data({key_mks: var.saved_markers}) + # in-place extraction of FEM coefficients from field.vector --> field.vector_stencil! + obj.extract_coeffs(update_ghost_regions=False) - # binning plot data - for bin_plot in species.binning_plots: - key_f = os.path.join(key_spec, "f", bin_plot.slice) - key_df = os.path.join(key_spec, "df", bin_plot.slice) + # save numpy array to be updated each time step. + if val["save_data"]: + key_field = "feec/" + key - data.add_data({key_f: bin_plot.f}) - data.add_data({key_df: bin_plot.df}) + if isinstance(obj.vector_stencil, StencilVector): + data.add_data( + {key_field: obj.vector_stencil._data}, + ) - for dim, be in enumerate(bin_plot.bin_edges): - data.file[key_f].attrs["bin_centers" + "_" + str(dim + 1)] = be[:-1] + (be[1] - be[0]) / 2 + else: + for n in range(3): + key_component = key_field + "/" + str(n + 1) + data.add_data( + {key_component: obj.vector_stencil[n]._data}, + ) - for i, kd_plot in enumerate(species.kernel_density_plots): - key_n = os.path.join(key_spec, "n_sph", f"view_{i}") + # save field meta data + data.file[key_field].attrs["space_id"] = obj.space_id + data.file[key_field].attrs["starts"] = obj.starts + data.file[key_field].attrs["ends"] = obj.ends + data.file[key_field].attrs["pads"] = obj.pads - data.add_data({key_n: kd_plot.n_sph}) - # save 1d point values, not meshgrids, because attrs size is limited - eta1 = kd_plot.plot_pts[0][:, 0, 0] - eta2 = kd_plot.plot_pts[1][0, :, 0] - eta3 = kd_plot.plot_pts[2][0, 0, :] - data.file[key_n].attrs["eta1"] = eta1 - data.file[key_n].attrs["eta2"] = eta2 - data.file[key_n].attrs["eta3"] = eta3 + # save numpy array to be updated only at the end of the simulation for restart. + key_field_restart = "restart/" + key - # TODO: maybe add other data - # else: - # data.add_data({key_dat: val1}) + if isinstance(obj.vector_stencil, StencilVector): + data.add_data( + {key_field_restart: obj.vector_stencil._data}, + ) + else: + for n in range(3): + key_component_restart = key_field_restart + "/" + str(n + 1) + data.add_data( + {key_component_restart: obj.vector_stencil[n]._data}, + ) # keys to be saved at each time step and only at end (restart) save_keys_all = [] @@ -1166,13 +1266,158 @@ def initialize_data_output(self, data: DataContainer, size): # Class methods : ################### + @classmethod + def model_units(cls, params, verbose=False, comm=None): + """ + Return model units and print them to screen. + + Parameters + ---------- + params : dict + model parameters. + + verbose : bool, optional + print model units to screen. + + comm : obj + MPI communicator. + + Returns + ------- + units_basic : dict + Basic units for time, length, mass and magnetic field. + + units_der : dict + Derived units for velocity, pressure, mass density and particle density. + """ + + from struphy.io.setup import derive_units + + if comm is None: + rank = 0 + else: + rank = comm.Get_rank() + + # look for bulk species in fluid OR kinetic parameter dictionaries + Z_bulk = None + A_bulk = None + if "fluid" in params: + if cls.bulk_species() in params["fluid"]: + Z_bulk = params["fluid"][cls.bulk_species()]["phys_params"]["Z"] + A_bulk = params["fluid"][cls.bulk_species()]["phys_params"]["A"] + if "kinetic" in params: + if cls.bulk_species() in params["kinetic"]: + Z_bulk = params["kinetic"][cls.bulk_species()]["phys_params"]["Z"] + A_bulk = params["kinetic"][cls.bulk_species()]["phys_params"]["A"] + + # compute model units + if "kBT" in params["units"]: + kBT = params["units"]["kBT"] + else: + kBT = None + + units = derive_units( + Z_bulk=Z_bulk, + A_bulk=A_bulk, + x=params["units"]["x"], + B=params["units"]["B"], + n=params["units"]["n"], + kBT=kBT, + velocity_scale=cls.velocity_scale(), + ) + + # print to screen + if verbose and rank == 0: + print("\nUNITS:") + print( + f"Unit of length:".ljust(25), + "{:4.3e}".format(units["x"]) + " m", + ) + print( + f"Unit of time:".ljust(25), + "{:4.3e}".format(units["t"]) + " s", + ) + print( + f"Unit of velocity:".ljust(25), + "{:4.3e}".format(units["v"]) + " m/s", + ) + print( + f"Unit of magnetic field:".ljust(25), + "{:4.3e}".format(units["B"]) + " T", + ) + + if A_bulk is not None: + print( + f"Unit of particle density:".ljust(25), + "{:4.3e}".format(units["n"]) + " m⁻³", + ) + print( + f"Unit of mass density:".ljust(25), + "{:4.3e}".format(units["rho"]) + " kg/m³", + ) + print( + f"Unit of pressure:".ljust(25), + "{:4.3e}".format(units["p"] * 1e-5) + " bar", + ) + print( + f"Unit of current density:".ljust(25), + "{:4.3e}".format(units["j"]) + " A/m²", + ) + + # compute equation parameters for each species + e = 1.602176634e-19 # elementary charge (C) + mH = 1.67262192369e-27 # proton mass (kg) + eps0 = 8.8541878128e-12 # vacuum permittivity (F/m) + + equation_params = {} + if "fluid" in params: + for species in params["fluid"]: + Z = params["fluid"][species]["phys_params"]["Z"] + A = params["fluid"][species]["phys_params"]["A"] + + # compute equation parameters + om_p = np.sqrt(units["n"] * (Z * e) ** 2 / (eps0 * A * mH)) + om_c = Z * e * units["B"] / (A * mH) + equation_params[species] = {} + equation_params[species]["alpha"] = om_p / om_c + equation_params[species]["epsilon"] = 1.0 / (om_c * units["t"]) + equation_params[species]["kappa"] = om_p * units["t"] + + if verbose and rank == 0: + print("\nNORMALIZATION PARAMETERS:") + print("- " + species + ":") + for key, val in equation_params[species].items(): + print((key + ":").ljust(25), "{:4.3e}".format(val)) + + if "kinetic" in params: + for species in params["kinetic"]: + Z = params["kinetic"][species]["phys_params"]["Z"] + A = params["kinetic"][species]["phys_params"]["A"] + + # compute equation parameters + om_p = np.sqrt(units["n"] * (Z * e) ** 2 / (eps0 * A * mH)) + om_c = Z * e * units["B"] / (A * mH) + equation_params[species] = {} + equation_params[species]["alpha"] = om_p / om_c + equation_params[species]["epsilon"] = 1.0 / (om_c * units["t"]) + equation_params[species]["kappa"] = om_p * units["t"] + + if verbose and rank == 0: + if "fluid" not in params: + print("\nNORMALIZATION PARAMETERS:") + print("- " + species + ":") + for key, val in equation_params[species].items(): + print((key + ":").ljust(25), "{:4.3e}".format(val)) + + return units, equation_params + @classmethod def show_options(cls): """Print available model options to screen.""" print( 'Options are given under the keyword "options" for each species dict. \ -Available options stand in lists as dict values.\nThe first entry of a list denotes the default value.', +Available options stand in lists as dict values.\nThe first entry of a list denotes the default value.' ) tab = " " @@ -1233,6 +1478,7 @@ def write_parameters_to_file(cls, parameters=None, file=None, save=True, prompt= import yaml + import struphy import struphy.utils.utils as utils # Read struphy state file @@ -1261,9 +1507,11 @@ def write_parameters_to_file(cls, parameters=None, file=None, save=True, prompt= else: pass + @classmethod def generate_default_parameter_file( - self, - path: str = None, + cls, + file: str = None, + save: bool = True, prompt: bool = True, ): """Generate a parameter file with default options for each species, @@ -1273,235 +1521,436 @@ def generate_default_parameter_file( Parameters ---------- - path : str - Alternative path to getcwd()/params_MODEL.py. + file : str + Alternative filename to params_.yml. + + save : bool + Whether to save the parameter file in the current input path. prompt : bool Whether to prompt for overwriting the specified .yml file. Returns ------- - params_path : str - The path of the parameter file. - """ + The default parameter dictionary.""" - if path is None: - path = os.path.join(os.getcwd(), f"params_{self.__class__.__name__}.py") + import os - # create new default file - try: - file = open(path, "x") - except FileExistsError: - if not prompt: - yn = "Y" - else: - yn = input(f"\nFile {path} exists, overwrite (Y/n)? ") - if yn in ("", "Y", "y", "yes", "Yes"): - file = open(path, "w") - else: - print("exiting ...") - exit() - except FileNotFoundError: - folder = os.path.join("/", *path.split("/")[:-1]) - if not prompt: - yn = "Y" - else: - yn = input(f"\nFolder {folder} does not exist, create (Y/n)? ") - if yn in ("", "Y", "y", "yes", "Yes"): - os.makedirs(folder) - file = open(path, "x") - else: - print("exiting ...") - exit() - - file.write("from struphy.io.options import EnvironmentOptions, BaseUnits, Time\n") - file.write("from struphy.geometry import domains\n") - file.write("from struphy.fields_background import equils\n") - - species_params = "\n# species parameters\n" - particle_params = "" - has_plasma = False - has_feec = False - has_pic = False - has_sph = False - for sn, species in self.species.items(): - assert isinstance(species, Species) - - if isinstance(species, (FluidSpecies, ParticleSpecies)): - has_plasma = True - species_params += f"model.{sn}.set_phys_params()\n" - if isinstance(species, ParticleSpecies): - particle_params += "\nloading_params = LoadingParameters()\n" - particle_params += "weights_params = WeightsParameters()\n" - particle_params += "boundary_params = BoundaryParameters()\n" - particle_params += f"model.{sn}.set_markers(loading_params=loading_params,\n" - txt = "weights_params=weights_params,\n" - particle_params += indent(txt, " " * len(f"model.{sn}.set_markers(")) - txt = "boundary_params=boundary_params,\n" - particle_params += indent(txt, " " * len(f"model.{sn}.set_markers(")) - txt = ")\n" - particle_params += indent(txt, " " * len(f"model.{sn}.set_markers(")) - particle_params += f"model.{sn}.set_sorting_boxes()\n" - particle_params += f"model.{sn}.set_save_data()\n" - - for vn, var in species.variables.items(): - if isinstance(var, FEECVariable): - has_feec = True - if var.space in ("H1", "L2"): - init_bckgr_feec = f"model.{sn}.{vn}.add_background(FieldsBackground())\n" - init_pert_feec = f"model.{sn}.{vn}.add_perturbation(perturbations.TorusModesCos())\n" - else: - init_bckgr_feec = f"model.{sn}.{vn}.add_background(FieldsBackground())\n" - init_pert_feec = ( - f"model.{sn}.{vn}.add_perturbation(perturbations.TorusModesCos(given_in_basis='v', comp=0))\n\ -model.{sn}.{vn}.add_perturbation(perturbations.TorusModesCos(given_in_basis='v', comp=1))\n\ -model.{sn}.{vn}.add_perturbation(perturbations.TorusModesCos(given_in_basis='v', comp=2))\n" - ) + import yaml + + import struphy + from struphy.io.setup import descend_options_dict + + libpath = struphy.__path__[0] + + # load a standard parameter file + with open(os.path.join(libpath, "io/inp/parameters.yml")) as tmp: + parameters = yaml.load(tmp, Loader=yaml.FullLoader) + + parameters["model"] = cls.__name__ + + # extract default em_fields parameters + bckgr_params_1_em = parameters["em_fields"]["background"]["var_1"] + bckgr_params_2_em = parameters["em_fields"]["background"]["var_2"] + parameters["em_fields"].pop("background") + + pert_params_1_em = parameters["em_fields"]["perturbation"]["var_1"] + pert_params_2_em = parameters["em_fields"]["perturbation"]["var_2"] + parameters["em_fields"].pop("perturbation") - elif isinstance(var, PICVariable): - has_pic = True - init_pert_pic = ( - "\n# if .add_initial_condition is not called, the background is the kinetic initial condition\n" + # extract default fluid parameters + bckgr_params_1_fluid = parameters["fluid"]["species_name"]["background"]["var_1"] + bckgr_params_2_fluid = parameters["fluid"]["species_name"]["background"]["var_2"] + parameters["fluid"]["species_name"].pop("background") + + pert_params_1_fluid = parameters["fluid"]["species_name"]["perturbation"]["var_1"] + pert_params_2_fluid = parameters["fluid"]["species_name"]["perturbation"]["var_2"] + parameters["fluid"]["species_name"].pop("perturbation") + + # standard Maxwellians + parameters["kinetic"]["species_name"].pop("background") + maxw_name = { + "6D": "Maxwellian3D", + "5D": "GyroMaxwellian2D", + "4D": "Maxwellian1D", + "3D": "ColdPlasma", + "PH": "ConstantVelocity", + } + + # init options dicts + d_opts = {"em_fields": [], "fluid": {}, "kinetic": {}} + + # set the correct names in the parameter file + if len(cls.species()["em_fields"]) > 0: + parameters["em_fields"]["background"] = {} + parameters["em_fields"]["perturbation"] = {} + for name, space in cls.species()["em_fields"].items(): + if space in {"H1", "L2"}: + parameters["em_fields"]["background"][name] = bckgr_params_1_em + parameters["em_fields"]["perturbation"][name] = pert_params_1_em + elif space in {"Hcurl", "Hdiv", "H1vec"}: + parameters["em_fields"]["background"][name] = bckgr_params_2_em + parameters["em_fields"]["perturbation"][name] = pert_params_2_em + else: + parameters.pop("em_fields") + + # find out the default em_fields options of the model + if "options" in cls.options()["em_fields"]: + # create the default options parameters + d_default = descend_options_dict( + cls.options()["em_fields"]["options"], + d_opts["em_fields"], + ) + parameters["em_fields"]["options"] = d_default + + # fluid + fluid_params = parameters["fluid"].pop("species_name") + + if len(cls.species()["fluid"]) > 0: + for name, dct in cls.species()["fluid"].items(): + parameters["fluid"][name] = fluid_params + parameters["fluid"][name]["background"] = {} + parameters["fluid"][name]["perturbation"] = {} + + # find out the default fluid options of the model + if name in cls.options()["fluid"]: + d_opts["fluid"][name] = [] + + # create the default options parameters + d_default = descend_options_dict( + cls.options()["fluid"][name]["options"], + d_opts["fluid"][name], ) - init_pert_pic += "perturbation = perturbations.TorusModesCos()\n" - if "6D" in var.space: - init_bckgr_pic = "maxwellian_1 = maxwellians.Maxwellian3D(n=(1.0, None))\n" - init_bckgr_pic += "maxwellian_2 = maxwellians.Maxwellian3D(n=(0.1, None))\n" - init_pert_pic += "maxwellian_1pt = maxwellians.Maxwellian3D(n=(1.0, perturbation))\n" - init_pert_pic += "init = maxwellian_1pt + maxwellian_2\n" - init_pert_pic += f"model.{sn}.{vn}.add_initial_condition(init)\n" - elif "5D" in var.space: - init_bckgr_pic = "maxwellian_1 = maxwellians.GyroMaxwellian2D(n=(1.0, None), equil=equil)\n" - init_bckgr_pic += "maxwellian_2 = maxwellians.GyroMaxwellian2D(n=(0.1, None), equil=equil)\n" - init_pert_pic += ( - "maxwellian_1pt = maxwellians.GyroMaxwellian2D(n=(1.0, perturbation), equil=equil)\n" - ) - init_pert_pic += "init = maxwellian_1pt + maxwellian_2\n" - init_pert_pic += f"model.{sn}.{vn}.add_initial_condition(init)\n" - if "3D" in var.space: - init_bckgr_pic = "maxwellian_1 = maxwellians.ColdPlasma(n=(1.0, None))\n" - init_bckgr_pic += "maxwellian_2 = maxwellians.ColdPlasma(n=(0.1, None))\n" - init_pert_pic += "maxwellian_1pt = maxwellians.ColdPlasma(n=(1.0, perturbation))\n" - init_pert_pic += "init = maxwellian_1pt + maxwellian_2\n" - init_pert_pic += f"model.{sn}.{vn}.add_initial_condition(init)\n" - init_bckgr_pic += "background = maxwellian_1 + maxwellian_2\n" - init_bckgr_pic += f"model.{sn}.{vn}.add_background(background)\n" - - exclude = "# model.....save_data = False\n" - - elif isinstance(var, SPHVariable): - has_sph = True - init_bckgr_sph = "background = equils.ConstantVelocity()\n" - init_bckgr_sph += f"model.{sn}.{vn}.add_background(background)\n" - init_pert_sph = "perturbation = perturbations.TorusModesCos()\n" - init_pert_sph += f"model.{sn}.{vn}.add_perturbation(del_n=perturbation)\n" - exclude = f"# model.{sn}.{vn}.save_data = False\n" - - file.write("from struphy.topology import grids\n") - file.write("from struphy.io.options import DerhamOptions\n") - file.write("from struphy.io.options import FieldsBackground\n") - file.write("from struphy.initial import perturbations\n") - - file.write("from struphy.kinetic_background import maxwellians\n") - file.write( - "from struphy.pic.utilities import (LoadingParameters,\n\ - WeightsParameters,\n\ - BoundaryParameters,\n\ - BinningPlot,\n\ - KernelDensityPlot,\n\ - )\n", - ) - file.write("from struphy import main\n") - - file.write("\n# import model, set verbosity\n") - file.write(f"from {self.__module__} import {self.__class__.__name__}\n") - - file.write("\n# environment options\n") - file.write("env = EnvironmentOptions()\n") - - file.write("\n# units\n") - file.write("base_units = BaseUnits()\n") - - file.write("\n# time stepping\n") - file.write("time_opts = Time()\n") - - file.write("\n# geometry\n") - file.write("domain = domains.Cuboid()\n") - - file.write("\n# fluid equilibrium (can be used as part of initial conditions)\n") - file.write("equil = equils.HomogenSlab()\n") - - # if has_feec: - grid = "grid = grids.TensorProductGrid()\n" - derham = "derham_opts = DerhamOptions()\n" - # else: - # grid = "grid = None\n" - # derham = "derham_opts = None\n" - - file.write("\n# grid\n") - file.write(grid) - - file.write("\n# derham options\n") - file.write(derham) - - file.write("\n# light-weight model instance\n") - file.write(f"model = {self.__class__.__name__}()\n") - - if has_plasma: - file.write(species_params) - - if has_pic or has_sph: - file.write(particle_params) - - file.write("\n# propagator options\n") - for prop in self.propagators.__dict__: - file.write(f"model.propagators.{prop}.options = model.propagators.{prop}.Options()\n") - - file.write("\n# background, perturbations and initial conditions\n") - if has_feec: - file.write(init_bckgr_feec) - file.write(init_pert_feec) - if has_pic: - file.write(init_bckgr_pic) - file.write(init_pert_pic) - if has_sph: - file.write(init_bckgr_sph) - file.write(init_pert_sph) - - file.write("\n# optional: exclude variables from saving\n") - file.write(exclude) - - file.write('\nif __name__ == "__main__":\n') - file.write(" # start run\n") - file.write(" verbose = True\n\n") - file.write( - " main.run(model,\n\ - params_path=__file__,\n\ - env=env,\n\ - base_units=base_units,\n\ - time_opts=time_opts,\n\ - domain=domain,\n\ - equil=equil,\n\ - grid=grid,\n\ - derham_opts=derham_opts,\n\ - verbose=verbose,\n\ - )", - ) - file.close() + parameters["fluid"][name]["options"] = d_default - print( - f"\nDefault parameter file for '{self.__class__.__name__}' has been created in the cwd ({path}).\n\ -You can now launch a simulation with 'python params_{self.__class__.__name__}.py'", + # set the correct names parameter file + for sub_name, space in dct.items(): + if space in {"H1", "L2"}: + parameters["fluid"][name]["background"][sub_name] = bckgr_params_1_fluid + parameters["fluid"][name]["perturbation"][sub_name] = pert_params_1_fluid + elif space in {"Hcurl", "Hdiv", "H1vec"}: + parameters["fluid"][name]["background"][sub_name] = bckgr_params_2_fluid + parameters["fluid"][name]["perturbation"][sub_name] = pert_params_2_fluid + else: + parameters.pop("fluid") + + # kinetic + kinetic_params = parameters["kinetic"].pop("species_name") + + if len(cls.species()["kinetic"]) > 0: + parameters["kinetic"] = {} + + for name, kind in cls.species()["kinetic"].items(): + parameters["kinetic"][name] = kinetic_params + + # find out the default kinetic options of the model + if name in cls.options()["kinetic"]: + d_opts["kinetic"][name] = [] + + # create the default options parameters + d_default = descend_options_dict( + cls.options()["kinetic"][name]["options"], + d_opts["kinetic"][name], + ) + + parameters["kinetic"][name]["options"] = d_default + + # set the background + dim = kind[-2:] + parameters["kinetic"][name]["background"] = { + maxw_name[dim]: {"n": 0.05}, + } + else: + parameters.pop("kinetic") + + # diagnostics + if cls.diagnostics_dct() is not None: + parameters["diagnostics"] = {} + for name, space in cls.diagnostics_dct().items(): + parameters["diagnostics"][name] = {"save_data": True} + + cls.write_parameters_to_file( + parameters=parameters, + file=file, + save=save, + prompt=prompt, ) - return path + return parameters ################### # Private methods : ################### - def compute_plasma_params(self, verbose=True): + def _init_variable_dicts(self): + """ + Initialize em-fields, fluid and kinetic dictionaries for information on the model variables. + """ + + # electromagnetic fields, fluid and/or kinetic species + self._em_fields = {} + self._fluid = {} + self._kinetic = {} + self._diagnostics = {} + + if self.rank_world == 0 and self.verbose: + print("\nMODEL SPECIES:") + + # create dictionaries for each em-field/species and fill in space/class name and parameters + for var_name, space in self.species()["em_fields"].items(): + assert space in {"H1", "Hcurl", "Hdiv", "L2", "H1vec"} + assert "em_fields" in self.params, 'Top-level key "em_fields" is missing in parameter file.' + + if self.rank_world == 0 and self.verbose: + print("em_field:".ljust(25), f'"{var_name}" ({space})') + + self._em_fields[var_name] = {} + + # space + self._em_fields[var_name]["space"] = space + + # initial conditions + if "background" in self.params["em_fields"]: + self._em_fields[var_name]["background"] = self.params["em_fields"]["background"].get(var_name) + if "perturbation" in self.params["em_fields"]: + self._em_fields[var_name]["perturbation"] = self.params["em_fields"]["perturbation"].get(var_name) + + # which components to save + if "save_data" in self.params["em_fields"]: + self._em_fields[var_name]["save_data"] = self.params["em_fields"]["save_data"]["comps"][var_name] + else: + self._em_fields[var_name]["save_data"] = True + + # overall parameters + self._em_fields["params"] = self.params["em_fields"] + + for var_name, space in self.species()["fluid"].items(): + assert isinstance(space, dict) + assert "fluid" in self.params, 'Top-level key "fluid" is missing in parameter file.' + assert var_name in self.params["fluid"], f"Fluid species {var_name} is missing in parameter file." + + if self.rank_world == 0 and self.verbose: + print("fluid:".ljust(25), f'"{var_name}" ({space})') + + self._fluid[var_name] = {} + for sub_var_name, sub_space in space.items(): + self._fluid[var_name][sub_var_name] = {} + + # space + self._fluid[var_name][sub_var_name]["space"] = sub_space + + # initial conditions + if "background" in self.params["fluid"][var_name]: + self._fluid[var_name][sub_var_name]["background"] = self.params["fluid"][var_name][ + "background" + ].get(sub_var_name) + if "perturbation" in self.params["fluid"][var_name]: + self._fluid[var_name][sub_var_name]["perturbation"] = self.params["fluid"][var_name][ + "perturbation" + ].get(sub_var_name) + + # which components to save + if "save_data" in self.params["fluid"][var_name]: + self._fluid[var_name][sub_var_name]["save_data"] = self.params["fluid"][var_name]["save_data"][ + "comps" + ][sub_var_name] + + else: + self._fluid[var_name][sub_var_name]["save_data"] = True + + # overall parameters + self._fluid[var_name]["params"] = self.params["fluid"][var_name] + + for var_name, space in self.species()["kinetic"].items(): + assert "Particles" in space + assert "kinetic" in self.params, 'Top-level key "kinetic" is missing in parameter file.' + assert var_name in self.params["kinetic"], f"Kinetic species {var_name} is missing in parameter file." + + if self.rank_world == 0 and self.verbose: + print("kinetic:".ljust(25), f'"{var_name}" ({space})') + + self._kinetic[var_name] = {} + self._kinetic[var_name]["space"] = space + self._kinetic[var_name]["params"] = self.params["kinetic"][var_name] + + if self.diagnostics_dct() is not None: + for var_name, space in self.diagnostics_dct().items(): + assert space in {"H1", "Hcurl", "Hdiv", "L2", "H1vec"} + + if self.rank_world == 0 and self.verbose: + print("diagnostics:".ljust(25), f'"{var_name}" ({space})') + + self._diagnostics[var_name] = {} + self._diagnostics[var_name]["space"] = space + self._diagnostics["params"] = self.params["diagnostics"][var_name] + + # which components to save + if "save_data" in self.params["diagnostics"][var_name]: + self._diagnostics[var_name]["save_data"] = self.params["diagnostics"][var_name]["save_data"] + + else: + self._diagnostics[var_name]["save_data"] = True + + def _allocate_variables(self): + """ + Allocate memory for model variables. + Creates FEM fields for em-fields and fluid variables and a particle class for kinetic species. + """ + + from struphy.feec.psydac_derham import Derham + from struphy.pic import particles + from struphy.pic.base import Particles + + # allocate memory for FE coeffs of electromagnetic fields/potentials + if "em_fields" in self.params: + for variable, dct in self.em_fields.items(): + if "params" in variable: + continue + else: + dct["obj"] = self.derham.create_spline_function( + variable, + dct["space"], + bckgr_params=dct.get("background"), + pert_params=dct.get("perturbation"), + ) + + self._pointer[variable] = dct["obj"].vector + + # allocate memory for FE coeffs of fluid variables + if "fluid" in self.params: + for species, dct in self.fluid.items(): + for variable, subdct in dct.items(): + if "params" in variable: + continue + else: + subdct["obj"] = self.derham.create_spline_function( + variable, + subdct["space"], + bckgr_params=subdct.get("background"), + pert_params=subdct.get("perturbation"), + ) + + self._pointer[species + "_" + variable] = subdct["obj"].vector + + # marker arrays and plasma parameters of kinetic species + if "kinetic" in self.params: + for species, val in self.kinetic.items(): + assert any([key in val["params"]["markers"] for key in ["Np", "ppc", "ppb"]]) + + bckgr_params = val["params"].get("background", None) + pert_params = val["params"].get("perturbation", None) + boxes_per_dim = val["params"].get("boxes_per_dim", None) + mpi_dims_mask = val["params"].get("dims_mask", None) + weights_params = val["params"].get("weights", None) + + if self.derham is None: + domain_decomp = None + else: + domain_array = self.derham.domain_array + nprocs = self.derham.domain_decomposition.nprocs + domain_decomp = (domain_array, nprocs) + + kinetic_class = getattr(particles, val["space"]) + # print(f"{kinetic_class = }") + val["obj"] = kinetic_class( + comm_world=self.comm_world, + clone_config=self.clone_config, + **val["params"]["markers"], + weights_params=weights_params, + domain_decomp=domain_decomp, + mpi_dims_mask=mpi_dims_mask, + boxes_per_dim=boxes_per_dim, + name=species, + equation_params=self.equation_params[species], + domain=self.domain, + equil=self.equil, + projected_equil=self.projected_equil, + bckgr_params=bckgr_params, + pert_params=pert_params, + ) + + obj = val["obj"] + assert isinstance(obj, Particles) + + self._pointer[species] = obj + + # for storing markers + val["kinetic_data"] = {} + + # for storing the distribution function + if "f" in val["params"]["save_data"]: + slices = val["params"]["save_data"]["f"]["slices"] + n_bins = val["params"]["save_data"]["f"]["n_bins"] + ranges = val["params"]["save_data"]["f"]["ranges"] + + val["kinetic_data"]["f"] = {} + val["kinetic_data"]["df"] = {} + val["bin_edges"] = {} + if len(slices) > 0: + for i, sli in enumerate(slices): + assert ((len(sli) - 2) / 3).is_integer() + assert len(slices[i].split("_")) == len(ranges[i]) == len(n_bins[i]), ( + f"Number of slices names ({len(slices[i].split('_'))}), number of bins ({len(n_bins[i])}), and number of ranges ({len(ranges[i])}) are inconsistent with each other!\n\n" + ) + val["bin_edges"][sli] = [] + dims = (len(sli) - 2) // 3 + 1 + for j in range(dims): + val["bin_edges"][sli] += [ + np.linspace( + ranges[i][j][0], + ranges[i][j][1], + n_bins[i][j] + 1, + ), + ] + val["kinetic_data"]["f"][sli] = np.zeros( + n_bins[i], + dtype=float, + ) + val["kinetic_data"]["df"][sli] = np.zeros( + n_bins[i], + dtype=float, + ) + + # for storing an sph evaluation of the density n + if "n_sph" in val["params"]["save_data"]: + plot_pts = val["params"]["save_data"]["n_sph"]["plot_pts"] + + val["kinetic_data"]["n_sph"] = [] + val["plot_pts"] = [] + for i, pts in enumerate(plot_pts): + assert len(pts) == 3 + eta1 = np.linspace(0.0, 1.0, pts[0]) + eta2 = np.linspace(0.0, 1.0, pts[1]) + eta3 = np.linspace(0.0, 1.0, pts[2]) + ee1, ee2, ee3 = np.meshgrid( + eta1, + eta2, + eta3, + indexing="ij", + ) + val["plot_pts"] += [(ee1, ee2, ee3)] + val["kinetic_data"]["n_sph"] += [np.zeros(ee1.shape, dtype=float)] + + # other data (wave-particle power exchange, etc.) + # TODO + + # allocate memory for FE coeffs of diagnostics + if "diagnostics" in self.params: + for key, val in self.diagnostics.items(): + if "params" in key: + continue + else: + val["obj"] = self.derham.create_spline_function( + key, + val["space"], + bckgr_params=None, + pert_params=None, + ) + + self._pointer[key] = val["obj"].vector + + def _compute_plasma_params(self, verbose=True): """ Compute and print volume averaged plasma parameters for each species of the model. @@ -1527,8 +1976,37 @@ def compute_plasma_params(self, verbose=True): - rho/L - alpha = Omega_p/Omega_c - epsilon = 1/(t*Omega_c) + + Returns + ------- + pparams : dict + Plasma parameters for each species. """ + from struphy.fields_background import equils + from struphy.fields_background.base import FluidEquilibriumWithB + from struphy.kinetic_background import maxwellians + + pparams = {} + + # physics constants + e = 1.602176634e-19 # elementary charge (C) + m_p = 1.67262192369e-27 # proton mass (kg) + mu0 = 1.25663706212e-6 # magnetic constant (N*A^-2) + eps0 = 8.8541878128e-12 # vacuum permittivity (F*m^-1) + kB = 1.380649e-23 # Boltzmann constant (J*K^-1) + + # exit when there is not any plasma species + if len(self.fluid) == 0 and len(self.kinetic) == 0: + return + + # compute model units + units, equation_params = self.model_units( + self.params, + verbose=False, + comm=self.comm_world, + ) + # units affices for printing units_affix = {} units_affix["plasma volume"] = " m³" @@ -1555,235 +2033,236 @@ def compute_plasma_params(self, verbose=True): units_affix["epsilon"] = "" h = 1 / 20 - eta1 = xp.linspace(h / 2.0, 1.0 - h / 2.0, 20) - eta2 = xp.linspace(h / 2.0, 1.0 - h / 2.0, 20) - eta3 = xp.linspace(h / 2.0, 1.0 - h / 2.0, 20) - - ## global parameters + eta1 = np.linspace(h / 2.0, 1.0 - h / 2.0, 20) + eta2 = np.linspace(h / 2.0, 1.0 - h / 2.0, 20) + eta3 = np.linspace(h / 2.0, 1.0 - h / 2.0, 20) + # global parameters # plasma volume (hat x^3) det_tmp = self.domain.jacobian_det(eta1, eta2, eta3) - vol1 = xp.mean(xp.abs(det_tmp)) + vol1 = np.mean(np.abs(det_tmp)) # plasma volume (m⁻³) - plasma_volume = vol1 * self.units.x**3 + plasma_volume = vol1 * units["x"] ** 3 # transit length (m) transit_length = plasma_volume ** (1 / 3) # magnetic field (T) if isinstance(self.equil, FluidEquilibriumWithB): B_tmp = self.equil.absB0(eta1, eta2, eta3) else: - B_tmp = xp.zeros((eta1.size, eta2.size, eta3.size)) - magnetic_field = xp.mean(B_tmp * xp.abs(det_tmp)) / vol1 * self.units.B - B_max = xp.max(B_tmp) * self.units.B - B_min = xp.min(B_tmp) * self.units.B + B_tmp = np.zeros((eta1.size, eta2.size, eta3.size)) + magnetic_field = np.mean(B_tmp * np.abs(det_tmp)) / vol1 * units["B"] + B_max = np.max(B_tmp) * units["B"] + B_min = np.min(B_tmp) * units["B"] if magnetic_field < 1e-14: - magnetic_field = xp.nan + magnetic_field = np.nan # print("\n+++++++ WARNING +++++++ magnetic field is zero - set to nan !!") - if verbose and MPI.COMM_WORLD.Get_rank() == 0: + if verbose: print("\nPLASMA PARAMETERS:") print( - "Plasma volume:".ljust(25), + f"Plasma volume:".ljust(25), "{:4.3e}".format(plasma_volume) + units_affix["plasma volume"], ) print( - "Transit length:".ljust(25), + f"Transit length:".ljust(25), "{:4.3e}".format(transit_length) + units_affix["transit length"], ) print( - "Avg. magnetic field:".ljust(25), + f"Avg. magnetic field:".ljust(25), "{:4.3e}".format(magnetic_field) + units_affix["magnetic field"], ) print( - "Max magnetic field:".ljust(25), + f"Max magnetic field:".ljust(25), "{:4.3e}".format(B_max) + units_affix["magnetic field"], ) print( - "Min magnetic field:".ljust(25), + f"Min magnetic field:".ljust(25), "{:4.3e}".format(B_min) + units_affix["magnetic field"], ) - # # species dependent parameters - # self._pparams = {} - - # if len(self.fluid_species) > 0: - # for species, val in self.fluid_species.items(): - # self._pparams[species] = {} - # # type - # self._pparams[species]["type"] = "fluid" - # # mass (kg) - # self._pparams[species]["mass"] = val["params"]["phys_params"]["A"] * m_p - # # charge (C) - # self._pparams[species]["charge"] = val["params"]["phys_params"]["Z"] * e - # # density (m⁻³) - # self._pparams[species]["density"] = ( - # xp.mean( - # self.equil.n0( - # eta1, - # eta2, - # eta3, - # ) - # * xp.abs(det_tmp), - # ) - # * self.units.x ** 3 - # / plasma_volume - # * self.units.n - # ) - # # pressure (bar) - # self._pparams[species]["pressure"] = ( - # xp.mean( - # self.equil.p0( - # eta1, - # eta2, - # eta3, - # ) - # * xp.abs(det_tmp), - # ) - # * self.units.x ** 3 - # / plasma_volume - # * self.units.p - # * 1e-5 - # ) - # # thermal energy (keV) - # self._pparams[species]["kBT"] = self._pparams[species]["pressure"] * 1e5 / self._pparams[species]["density"] / e * 1e-3 - - # if len(self.kinetic) > 0: - # eta1mg, eta2mg, eta3mg = xp.meshgrid( - # eta1, - # eta2, - # eta3, - # indexing="ij", - # ) - - # for species, val in self.kinetic.items(): - # self._pparams[species] = {} - # # type - # self._pparams[species]["type"] = "kinetic" - # # mass (kg) - # self._pparams[species]["mass"] = val["params"]["phys_params"]["A"] * m_p - # # charge (C) - # self._pparams[species]["charge"] = val["params"]["phys_params"]["Z"] * e - - # # create temp kinetic object for (default) parameter extraction - # tmp_bckgr = val["params"]["background"] - - # if val["space"] != "ParticlesSPH": - # tmp = None - # for fi, maxw_params in tmp_bckgr.items(): - # if fi[-2] == "_": - # fi_type = fi[:-2] - # else: - # fi_type = fi - - # if tmp is None: - # tmp = getattr(maxwellians, fi_type)( - # maxw_params=maxw_params, - # equil=self.equil, - # ) - # else: - # tmp = tmp + getattr(maxwellians, fi_type)( - # maxw_params=maxw_params, - # equil=self.equil, - # ) - - # if val["space"] != "ParticlesSPH" and tmp.coords == "constants_of_motion": - # # call parameters - # a1 = self.domain.params_map["a1"] - # r = eta1mg * (1 - a1) + a1 - # psi = self.equil.psi_r(r) - - # # density (m⁻³) - # self._pparams[species]["density"] = ( - # xp.mean(tmp.n(psi) * xp.abs(det_tmp)) * self.units.x ** 3 / plasma_volume * self.units.n - # ) - # # thermal speed (m/s) - # self._pparams[species]["v_th"] = ( - # xp.mean(tmp.vth(psi) * xp.abs(det_tmp)) * self.units.x ** 3 / plasma_volume * self.units.v - # ) - # # thermal energy (keV) - # self._pparams[species]["kBT"] = self._pparams[species]["mass"] * self._pparams[species]["v_th"] ** 2 / e * 1e-3 - # # pressure (bar) - # self._pparams[species]["pressure"] = ( - # self._pparams[species]["kBT"] * e * 1e3 * self._pparams[species]["density"] * 1e-5 - # ) - - # else: - # # density (m⁻³) - # # self._pparams[species]['density'] = xp.mean(tmp.n( - # # eta1mg, eta2mg, eta3mg) * xp.abs(det_tmp)) * units['x']**3 / plasma_volume * units['n'] - # self._pparams[species]["density"] = 99.0 - # # thermal speeds (m/s) - # vth = [] - # # vths = tmp.vth(eta1mg, eta2mg, eta3mg) - # vths = [99.0] - # for k in range(len(vths)): - # vth += [ - # vths[k] * xp.abs(det_tmp) * self.units.x ** 3 / plasma_volume * self.units.v, - # ] - # thermal_speed = 0.0 - # for dir in range(val["obj"].vdim): - # # self._pparams[species]['vth' + str(dir + 1)] = xp.mean(vth[dir]) - # self._pparams[species]["vth" + str(dir + 1)] = 99.0 - # thermal_speed += self._pparams[species]["vth" + str(dir + 1)] - # # TODO: here it is assumed that background density parameter is called "n", - # # and that background thermal speeds are called "vthn"; make this a convention? - # # self._pparams[species]['v_th'] = thermal_speed / \ - # # val['obj'].vdim - # self._pparams[species]["v_th"] = 99.0 - # # thermal energy (keV) - # # self._pparams[species]['kBT'] = self._pparams[species]['mass'] * \ - # # self._pparams[species]['v_th']**2 / e * 1e-3 - # self._pparams[species]["kBT"] = 99.0 - # # pressure (bar) - # # self._pparams[species]['pressure'] = self._pparams[species]['kBT'] * \ - # # e * 1e3 * self._pparams[species]['density'] * 1e-5 - # self._pparams[species]["pressure"] = 99.0 - - # for species in self._pparams: - # # alfvén speed (m/s) - # self._pparams[species]["v_A"] = magnetic_field / xp.sqrt( - # mu0 * self._pparams[species]["mass"] * self._pparams[species]["density"], - # ) - # # thermal speed (m/s) - # self._pparams[species]["v_th"] = xp.sqrt( - # self._pparams[species]["kBT"] * 1e3 * e / self._pparams[species]["mass"], - # ) - # # thermal frequency (Mrad/s) - # self._pparams[species]["Omega_th"] = self._pparams[species]["v_th"] / transit_length * 1e-6 - # # cyclotron frequency (Mrad/s) - # self._pparams[species]["Omega_c"] = self._pparams[species]["charge"] * magnetic_field / self._pparams[species]["mass"] * 1e-6 - # # plasma frequency (Mrad/s) - # self._pparams[species]["Omega_p"] = ( - # xp.sqrt( - # self._pparams[species]["density"] * (self._pparams[species]["charge"]) ** 2 / eps0 / self._pparams[species]["mass"], - # ) - # * 1e-6 - # ) - # # alfvén frequency (Mrad/s) - # self._pparams[species]["Omega_A"] = self._pparams[species]["v_A"] / transit_length * 1e-6 - # # Larmor radius (m) - # self._pparams[species]["rho_th"] = self._pparams[species]["v_th"] / (self._pparams[species]["Omega_c"] * 1e6) - # # MHD length scale (m) - # self._pparams[species]["v_A/Omega_c"] = self._pparams[species]["v_A"] / (xp.abs(self._pparams[species]["Omega_c"]) * 1e6) - # # dim-less ratios - # self._pparams[species]["rho_th/L"] = self._pparams[species]["rho_th"] / transit_length - - # if verbose and self.rank_world == 0: - # print("\nSPECIES PARAMETERS:") - # for species, ch in self._pparams.items(): - # print(f"\nname:".ljust(26), species) - # print(f"type:".ljust(25), ch["type"]) - # ch.pop("type") - # print(f"is bulk:".ljust(25), species == self.bulk_species()) - # for kinds, vals in ch.items(): - # print( - # kinds.ljust(25), - # "{:+4.3e}".format( - # vals, - # ), - # units_affix[kinds], - # ) + # species dependent parameters + pparams = {} + + if len(self.fluid) > 0: + for species, val in self.fluid.items(): + pparams[species] = {} + # type + pparams[species]["type"] = "fluid" + # mass (kg) + pparams[species]["mass"] = val["params"]["phys_params"]["A"] * m_p + # charge (C) + pparams[species]["charge"] = val["params"]["phys_params"]["Z"] * e + # density (m⁻³) + pparams[species]["density"] = ( + np.mean( + self.equil.n0( + eta1, + eta2, + eta3, + ) + * np.abs(det_tmp), + ) + * units["x"] ** 3 + / plasma_volume + * units["n"] + ) + # pressure (bar) + pparams[species]["pressure"] = ( + np.mean( + self.equil.p0( + eta1, + eta2, + eta3, + ) + * np.abs(det_tmp), + ) + * units["x"] ** 3 + / plasma_volume + * units["p"] + * 1e-5 + ) + # thermal energy (keV) + pparams[species]["kBT"] = pparams[species]["pressure"] * 1e5 / pparams[species]["density"] / e * 1e-3 + + if len(self.kinetic) > 0: + eta1mg, eta2mg, eta3mg = np.meshgrid( + eta1, + eta2, + eta3, + indexing="ij", + ) + + for species, val in self.kinetic.items(): + pparams[species] = {} + # type + pparams[species]["type"] = "kinetic" + # mass (kg) + pparams[species]["mass"] = val["params"]["phys_params"]["A"] * m_p + # charge (C) + pparams[species]["charge"] = val["params"]["phys_params"]["Z"] * e + + # create temp kinetic object for (default) parameter extraction + tmp_bckgr = val["params"]["background"] + + if val["space"] != "ParticlesSPH": + tmp = None + for fi, maxw_params in tmp_bckgr.items(): + if fi[-2] == "_": + fi_type = fi[:-2] + else: + fi_type = fi + + if tmp is None: + tmp = getattr(maxwellians, fi_type)( + maxw_params=maxw_params, + equil=self.equil, + ) + else: + tmp = tmp + getattr(maxwellians, fi_type)( + maxw_params=maxw_params, + equil=self.equil, + ) + + if val["space"] != "ParticlesSPH" and tmp.coords == "constants_of_motion": + # call parameters + a1 = self.domain.params["a1"] + r = eta1mg * (1 - a1) + a1 + psi = self.equil.psi_r(r) + + # density (m⁻³) + pparams[species]["density"] = ( + np.mean(tmp.n(psi) * np.abs(det_tmp)) * units["x"] ** 3 / plasma_volume * units["n"] + ) + # thermal speed (m/s) + pparams[species]["v_th"] = ( + np.mean(tmp.vth(psi) * np.abs(det_tmp)) * units["x"] ** 3 / plasma_volume * units["v"] + ) + # thermal energy (keV) + pparams[species]["kBT"] = pparams[species]["mass"] * pparams[species]["v_th"] ** 2 / e * 1e-3 + # pressure (bar) + pparams[species]["pressure"] = ( + pparams[species]["kBT"] * e * 1e3 * pparams[species]["density"] * 1e-5 + ) + + else: + # density (m⁻³) + # pparams[species]['density'] = np.mean(tmp.n( + # eta1mg, eta2mg, eta3mg) * np.abs(det_tmp)) * units['x']**3 / plasma_volume * units['n'] + pparams[species]["density"] = 99.0 + # thermal speeds (m/s) + vth = [] + # vths = tmp.vth(eta1mg, eta2mg, eta3mg) + vths = [99.0] + for k in range(len(vths)): + vth += [ + vths[k] * np.abs(det_tmp) * units["x"] ** 3 / plasma_volume * units["v"], + ] + thermal_speed = 0.0 + for dir in range(val["obj"].vdim): + # pparams[species]['vth' + str(dir + 1)] = np.mean(vth[dir]) + pparams[species]["vth" + str(dir + 1)] = 99.0 + thermal_speed += pparams[species]["vth" + str(dir + 1)] + # TODO: here it is assumed that background density parameter is called "n", + # and that background thermal speeds are called "vthn"; make this a convention? + # pparams[species]['v_th'] = thermal_speed / \ + # val['obj'].vdim + pparams[species]["v_th"] = 99.0 + # thermal energy (keV) + # pparams[species]['kBT'] = pparams[species]['mass'] * \ + # pparams[species]['v_th']**2 / e * 1e-3 + pparams[species]["kBT"] = 99.0 + # pressure (bar) + # pparams[species]['pressure'] = pparams[species]['kBT'] * \ + # e * 1e3 * pparams[species]['density'] * 1e-5 + pparams[species]["pressure"] = 99.0 + + for species in pparams: + # alfvén speed (m/s) + pparams[species]["v_A"] = magnetic_field / np.sqrt( + mu0 * pparams[species]["mass"] * pparams[species]["density"], + ) + # thermal speed (m/s) + pparams[species]["v_th"] = np.sqrt( + pparams[species]["kBT"] * 1e3 * e / pparams[species]["mass"], + ) + # thermal frequency (Mrad/s) + pparams[species]["Omega_th"] = pparams[species]["v_th"] / transit_length * 1e-6 + # cyclotron frequency (Mrad/s) + pparams[species]["Omega_c"] = pparams[species]["charge"] * magnetic_field / pparams[species]["mass"] * 1e-6 + # plasma frequency (Mrad/s) + pparams[species]["Omega_p"] = ( + np.sqrt( + pparams[species]["density"] * (pparams[species]["charge"]) ** 2 / eps0 / pparams[species]["mass"], + ) + * 1e-6 + ) + # alfvén frequency (Mrad/s) + pparams[species]["Omega_A"] = pparams[species]["v_A"] / transit_length * 1e-6 + # Larmor radius (m) + pparams[species]["rho_th"] = pparams[species]["v_th"] / (pparams[species]["Omega_c"] * 1e6) + # MHD length scale (m) + pparams[species]["v_A/Omega_c"] = pparams[species]["v_A"] / (np.abs(pparams[species]["Omega_c"]) * 1e6) + # dim-less ratios + pparams[species]["rho_th/L"] = pparams[species]["rho_th"] / transit_length + + if verbose: + print("\nSPECIES PARAMETERS:") + for species, ch in pparams.items(): + print(f"\nname:".ljust(26), species) + print(f"type:".ljust(25), ch["type"]) + ch.pop("type") + print(f"is bulk:".ljust(25), species == self.bulk_species()) + for kinds, vals in ch.items(): + print( + kinds.ljust(25), + "{:+4.3e}".format( + vals, + ), + units_affix[kinds], + ) + + return pparams class MyDumper(yaml.SafeDumper): diff --git a/src/struphy/models/fluid.py b/src/struphy/models/fluid.py index 405610b7b..04f5fca1c 100644 --- a/src/struphy/models/fluid.py +++ b/src/struphy/models/fluid.py @@ -1,17 +1,6 @@ -import cunumpy as xp -from psydac.ddm.mpi import mpi as MPI -from psydac.linalg.block import BlockVector -from psydac.linalg.stencil import StencilVector - -from struphy.feec.projectors import L2Projector -from struphy.feec.variational_utilities import H1vecMassMatrix_density, InternalEnergyEvaluator from struphy.models.base import StruphyModel -from struphy.models.species import DiagnosticSpecies, FieldSpecies, FluidSpecies, ParticleSpecies -from struphy.models.variables import FEECVariable, PICVariable, SPHVariable, Variable -from struphy.polar.basic import PolarVector from struphy.propagators import propagators_coupling, propagators_fields, propagators_markers - -rank = MPI.COMM_WORLD.Get_rank() +from struphy.utils.arrays import xp as np class LinearMHD(StruphyModel): @@ -42,51 +31,91 @@ class LinearMHD(StruphyModel): 1. :class:`~struphy.propagators.propagators_fields.ShearAlfven` 2. :class:`~struphy.propagators.propagators_fields.Magnetosonic` + + :ref:`Model info `: """ - ## species + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() + dct["em_fields"]["b_field"] = "Hdiv" + dct["fluid"]["mhd"] = {"density": "L2", "velocity": "Hdiv", "pressure": "L2"} + return dct - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="Hdiv") - self.pressure = FEECVariable(space="L2") - self.init_variables() + @staticmethod + def bulk_species(): + return "mhd" - ## propagators + @staticmethod + def velocity_scale(): + return "alfvén" - class Propagators: - def __init__(self): - self.shear_alf = propagators_fields.ShearAlfven() - self.mag_sonic = propagators_fields.Magnetosonic() + @staticmethod + def propagators_dct(): + return { + propagators_fields.ShearAlfven: ["mhd_velocity", "b_field"], + propagators_fields.Magnetosonic: ["mhd_density", "mhd_velocity", "mhd_pressure"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["fluid", "mhd"], + key="u_space", + option="Hdiv", + dct=dct, + ) + return dct - ## abstract methods + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + from struphy.polar.basic import PolarVector - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() + # extract necessary parameters + u_space = params["fluid"]["mhd"]["options"]["u_space"] + alfven_solver = params["fluid"]["mhd"]["options"]["ShearAlfven"]["solver"] + alfven_algo = params["fluid"]["mhd"]["options"]["ShearAlfven"]["algo"] + sonic_solver = params["fluid"]["mhd"]["options"]["Magnetosonic"]["solver"] - # 2. instantiate all propagators - self.propagators = self.Propagators() + # project background magnetic field (2-form) and pressure (3-form) + self._b_eq = self.projected_equil.b2 + self._p_eq = self.projected_equil.p3 + self._ones = self._p_eq.space.zeros() - # 3. assign variables to propagators - self.propagators.shear_alf.variables.u = self.mhd.velocity - self.propagators.shear_alf.variables.b = self.em_fields.b_field + if isinstance(self._ones, PolarVector): + self._ones.tp[:] = 1.0 + else: + self._ones[:] = 1.0 + + # set keyword arguments for propagators + self._kwargs[propagators_fields.ShearAlfven] = { + "u_space": u_space, + "solver": alfven_solver, + "algo": alfven_algo, + } + + self._kwargs[propagators_fields.Magnetosonic] = { + "b": self.pointer["b_field"], + "u_space": u_space, + "solver": sonic_solver, + } - self.propagators.mag_sonic.variables.n = self.mhd.density - self.propagators.mag_sonic.variables.u = self.mhd.velocity - self.propagators.mag_sonic.variables.p = self.mhd.pressure + # Initialize propagators used in splitting substeps + self.init_propagators() - # define scalars for update_scalar_quantities + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_p") self.add_scalar("en_B") @@ -95,35 +124,15 @@ def __init__(self): self.add_scalar("en_B_tot") self.add_scalar("en_tot") - @property - def bulk_species(self): - return self.mhd - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - self._ones = self.projected_equil.p3.space.zeros() - if isinstance(self._ones, PolarVector): - self._ones.tp[:] = 1.0 - else: - self._ones[:] = 1.0 - - self._tmp_b1: BlockVector = self.derham.Vh["2"].zeros() # TODO: replace derham.Vh dict by class - self._tmp_b2: BlockVector = self.derham.Vh["2"].zeros() + # vectors for computing scalar quantities + self._tmp_b1 = self.derham.Vh["2"].zeros() + self._tmp_b2 = self.derham.Vh["2"].zeros() def update_scalar_quantities(self): # perturbed fields - en_U = 0.5 * self.mass_ops.M2n.dot_inner( - self.mhd.velocity.spline.vector, - self.mhd.velocity.spline.vector, - ) - en_B = 0.5 * self.mass_ops.M2.dot_inner( - self.em_fields.b_field.spline.vector, - self.em_fields.b_field.spline.vector, - ) - en_p = self.mhd.pressure.spline.vector.inner(self._ones) / (5 / 3 - 1) + en_U = 0.5 * self.mass_ops.M2n.dot_inner(self.pointer["mhd_velocity"], self.pointer["mhd_velocity"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) + en_p = self.pointer["mhd_pressure"].inner(self._ones) / (5 / 3 - 1) self.update_scalar("en_U", en_U) self.update_scalar("en_B", en_B) @@ -131,17 +140,17 @@ def update_scalar_quantities(self): self.update_scalar("en_tot", en_U + en_B + en_p) # background fields - self.mass_ops.M2.dot(self.projected_equil.b2, apply_bc=False, out=self._tmp_b1) + self.mass_ops.M2.dot(self._b_eq, apply_bc=False, out=self._tmp_b1) - en_B0 = self.projected_equil.b2.inner(self._tmp_b1) / 2 - en_p0 = self.projected_equil.p3.inner(self._ones) / (5 / 3 - 1) + en_B0 = self._b_eq.inner(self._tmp_b1) / 2 + en_p0 = self._p_eq.inner(self._ones) / (5 / 3 - 1) self.update_scalar("en_B_eq", en_B0) self.update_scalar("en_p_eq", en_p0) # total magnetic field - self.projected_equil.b2.copy(out=self._tmp_b1) - self._tmp_b1 += self.em_fields.b_field.spline.vector + self._b_eq.copy(out=self._tmp_b1) + self._tmp_b1 += self.pointer["b_field"] self.mass_ops.M2.dot(self._tmp_b1, apply_bc=False, out=self._tmp_b2) @@ -149,23 +158,6 @@ def update_scalar_quantities(self): self.update_scalar("en_B_tot", en_Btot) - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "mag_sonic.Options" in line: - new_file += [ - "model.propagators.mag_sonic.options = model.propagators.mag_sonic.Options(b_field=model.em_fields.b_field)\n", - ] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class LinearExtendedMHDuniform(StruphyModel): r"""Linear extended MHD with zero-flow equilibrium (:math:`\mathbf U_0 = 0`). @@ -206,52 +198,87 @@ class LinearExtendedMHDuniform(StruphyModel): :ref:`Model info `: """ - ## species + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hcurl") - self.init_variables() + dct["em_fields"]["b_field"] = "Hcurl" + dct["fluid"]["mhd"] = { + "rho": "L2", + "u": "Hdiv", + "p": "L2", + } + return dct - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="Hdiv") - self.pressure = FEECVariable(space="L2") - self.init_variables() + @staticmethod + def bulk_species(): + return "mhd" - ## propagators + @staticmethod + def velocity_scale(): + return "alfvén" - class Propagators: - def __init__(self): - self.shear_alf = propagators_fields.ShearAlfvenB1() - self.hall = propagators_fields.Hall() - self.mag_sonic = propagators_fields.MagnetosonicUniform() + @staticmethod + def propagators_dct(): + return { + propagators_fields.ShearAlfvenB1: ["mhd_u", "b_field"], + propagators_fields.Hall: ["b_field"], + propagators_fields.MagnetosonicUniform: ["mhd_rho", "mhd_u", "mhd_p"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + from struphy.polar.basic import PolarVector + + # extract necessary parameters + alfven_solver = params["fluid"]["mhd"]["options"]["ShearAlfvenB1"]["solver"] + M1_inv = params["fluid"]["mhd"]["options"]["ShearAlfvenB1"]["solver_M1"] + hall_solver = params["em_fields"]["options"]["Hall"]["solver"] + sonic_solver = params["fluid"]["mhd"]["options"]["MagnetosonicUniform"]["solver"] + + # project background magnetic field (1-form) and pressure (3-form) + self._b_eq = self.projected_equil.b1 + self._a_eq = self.projected_equil.a1 + self._p_eq = self.projected_equil.p3 + self._ones = self.pointer["mhd_p"].space.zeros() - ## abstract methods + if isinstance(self._ones, PolarVector): + self._ones.tp[:] = 1.0 + else: + self._ones[:] = 1.0 - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + # compute coupling parameters + epsilon = self.equation_params["mhd"]["epsilon"] - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() + if abs(epsilon - 1) < 1e-6: + epsilon = 1.0 - # 2. instantiate all propagators - self.propagators = self.Propagators() + # set keyword arguments for propagators + self._kwargs[propagators_fields.ShearAlfvenB1] = { + "solver": alfven_solver, + "solver_M1": M1_inv, + } - # 3. assign variables to propagators - self.propagators.shear_alf.variables.u = self.mhd.velocity - self.propagators.shear_alf.variables.b = self.em_fields.b_field + self._kwargs[propagators_fields.Hall] = { + "solver": hall_solver, + "epsilon": epsilon, + } - self.propagators.hall.variables.b = self.em_fields.b_field + self._kwargs[propagators_fields.MagnetosonicUniform] = {"solver": sonic_solver} - self.propagators.mag_sonic.variables.n = self.mhd.density - self.propagators.mag_sonic.variables.u = self.mhd.velocity - self.propagators.mag_sonic.variables.p = self.mhd.pressure + # Initialize propagators used in splitting substeps + self.init_propagators() - # define scalars for update_scalar_quantities + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_p") self.add_scalar("en_B") @@ -261,45 +288,17 @@ def __init__(self): self.add_scalar("en_tot") self.add_scalar("helicity") - @property - def bulk_species(self): - return self.mhd - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - self._b_eq = self.projected_equil.b1 - self._a_eq = self.projected_equil.a1 - self._p_eq = self.projected_equil.p3 - - self._ones = self.projected_equil.p3.space.zeros() - if isinstance(self._ones, PolarVector): - self._ones.tp[:] = 1.0 - else: - self._ones[:] = 1.0 - - self._tmp_b1: BlockVector = self.derham.Vh["1"].zeros() # TODO: replace derham.Vh dict by class - self._tmp_b2: BlockVector = self.derham.Vh["1"].zeros() - - # adjust coupling parameters - epsilon = self.mhd.equation_params.epsilon - - if abs(epsilon - 1) < 1e-6: - self.mhd.equation_params.epsilon = 1.0 + # temporary vectors for scalar quantities + self._tmp_b1 = self.derham.Vh["1"].zeros() + self._tmp_b2 = self.derham.Vh["1"].zeros() def update_scalar_quantities(self): # perturbed fields - u = self.mhd.velocity.spline.vector - p = self.mhd.pressure.spline.vector - b = self.em_fields.b_field.spline.vector - - en_U = 0.5 * self.mass_ops.M2n.dot_inner(u, u) - b1 = self.mass_ops.M1.dot(b, out=self._tmp_b1) - en_B = 0.5 * b.inner(b1) + en_U = 0.5 * self.mass_ops.M2n.dot_inner(self.pointer["mhd_u"], self.pointer["mhd_u"]) + b1 = self.mass_ops.M1.dot(self.pointer["b_field"], out=self._tmp_b1) + en_B = 0.5 * self.pointer["b_field"].inner(b1) helicity = 2.0 * self._a_eq.inner(b1) - en_p_i = p.inner(self._ones) / (5.0 / 3.0 - 1.0) + en_p_i = self.pointer["mhd_p"].inner(self._ones) / (5.0 / 3.0 - 1.0) self.update_scalar("en_U", en_U) self.update_scalar("en_B", en_B) @@ -317,30 +316,13 @@ def update_scalar_quantities(self): # total magnetic field b1 = self._b_eq.copy(out=self._tmp_b1) - self._tmp_b1 += b + self._tmp_b1 += self.pointer["b_field"] b2 = self.mass_ops.M1.dot(b1, apply_bc=False, out=self._tmp_b2) en_Btot = b1.inner(b2) / 2.0 self.update_scalar("en_B_tot", en_Btot) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "hall.Options" in line: - new_file += [ - "model.propagators.hall.options = model.propagators.hall.Options(epsilon_from=model.mhd)\n", - ] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class ColdPlasma(StruphyModel): r"""Cold plasma model. @@ -377,74 +359,82 @@ class ColdPlasma(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.e_field = FEECVariable(space="Hcurl") - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class Electrons(FluidSpecies): - def __init__(self): - self.current = FEECVariable(space="Hcurl") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self): - self.maxwell = propagators_fields.Maxwell() - self.ohm = propagators_fields.OhmCold() - self.jxb = propagators_fields.JxBCold() - - ## abstract methods + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + dct["em_fields"]["e_field"] = "Hcurl" + dct["em_fields"]["b_field"] = "Hdiv" + dct["fluid"]["electrons"] = {"j": "Hcurl"} + return dct - # 1. instantiate all species - self.em_fields = self.EMFields() - self.electrons = self.Electrons() + @staticmethod + def bulk_species(): + return "electrons" - # 2. instantiate all propagators - self.propagators = self.Propagators() - - # 3. assign variables to propagators - self.propagators.maxwell.variables.e = self.em_fields.e_field - self.propagators.maxwell.variables.b = self.em_fields.b_field - - self.propagators.ohm.variables.j = self.electrons.current - self.propagators.ohm.variables.e = self.em_fields.e_field - - self.propagators.jxb.variables.j = self.electrons.current + @staticmethod + def velocity_scale(): + return "light" - # define scalars for update_scalar_quantities + @staticmethod + def propagators_dct(): + return { + propagators_fields.Maxwell: ["e_field", "b_field"], + propagators_fields.OhmCold: ["electrons_j", "e_field"], + propagators_fields.JxBCold: ["electrons_j"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + # model parameters + self._alpha = self.equation_params["electrons"]["alpha"] + self._epsilon = self.equation_params["electrons"]["epsilon"] + + # solver parameters + params_maxwell = params["em_fields"]["options"]["Maxwell"]["solver"] + params_ohmcold = params["fluid"]["electrons"]["options"]["OhmCold"]["solver"] + params_jxbcold = params["fluid"]["electrons"]["options"]["JxBCold"]["solver"] + + # set keyword arguments for propagators + self._kwargs[propagators_fields.Maxwell] = {"solver": params_maxwell} + + self._kwargs[propagators_fields.OhmCold] = { + "alpha": self._alpha, + "epsilon": self._epsilon, + "solver": params_ohmcold, + } + + self._kwargs[propagators_fields.JxBCold] = { + "epsilon": self._epsilon, + "solver": params_jxbcold, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("electric energy") self.add_scalar("magnetic energy") self.add_scalar("kinetic energy") self.add_scalar("total energy") - @property - def bulk_species(self): - return self.electrons - - @property - def velocity_scale(self): - return "light" - - def allocate_helpers(self): - self._alpha = self.electrons.equation_params.alpha - def update_scalar_quantities(self): - e = self.em_fields.e_field.spline.vector - b = self.em_fields.b_field.spline.vector - j = self.electrons.current.spline.vector - - en_E = 0.5 * self.mass_ops.M1.dot_inner(e, e) - en_B = 0.5 * self.mass_ops.M2.dot_inner(b, b) - en_J = 0.5 * self._alpha**2 * self.mass_ops.M1ninv.dot_inner(j, j) + en_E = 0.5 * self.mass_ops.M1.dot_inner(self.pointer["e_field"], self.pointer["e_field"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) + en_J = ( + 0.5 + * self._alpha**2 + * self.mass_ops.M1ninv.dot_inner(self.pointer["electrons_j"], self.pointer["electrons_j"]) + ) self.update_scalar("electric energy", en_E) self.update_scalar("magnetic energy", en_B) @@ -452,7 +442,7 @@ def update_scalar_quantities(self): self.update_scalar("total energy", en_E + en_B + en_J) -class ViscoResistiveMHD(StruphyModel): +class ViscoresistiveMHD(StruphyModel): r"""Full (non-linear) visco-resistive MHD equations discretized with a variational method. :ref:`normalization`: @@ -488,73 +478,139 @@ class ViscoResistiveMHD(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.entropy = FEECVariable(space="L2") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - self.variat_ent = propagators_fields.VariationalEntropyEvolve() - self.variat_mag = propagators_fields.VariationalMagFieldEvolve() - if with_viscosity: - self.variat_viscous = propagators_fields.VariationalViscosity() - if with_resistivity: - self.variat_resist = propagators_fields.VariationalResistivity() - - ## abstract methods - - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - - # 2. instantiate all propagators - self.propagators = self.Propagators( - with_viscosity=with_viscosity, - with_resistivity=with_resistivity, - ) + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["em_fields"]["b2"] = "Hdiv" + dct["fluid"]["mhd"] = {"rho3": "L2", "s3": "L2", "uv": "H1vec"} + return dct + + @staticmethod + def bulk_species(): + return "mhd" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.mhd.density - self.propagators.variat_dens.variables.u = self.mhd.velocity - self.propagators.variat_mom.variables.u = self.mhd.velocity - self.propagators.variat_ent.variables.s = self.mhd.entropy - self.propagators.variat_ent.variables.u = self.mhd.velocity - self.propagators.variat_mag.variables.u = self.mhd.velocity - self.propagators.variat_mag.variables.b = self.em_fields.b_field - if with_viscosity: - self.propagators.variat_viscous.variables.s = self.mhd.entropy - self.propagators.variat_viscous.variables.u = self.mhd.velocity - if with_resistivity: - self.propagators.variat_resist.variables.s = self.mhd.entropy - self.propagators.variat_resist.variables.b = self.em_fields.b_field - - # define scalars for update_scalar_quantities + @staticmethod + def velocity_scale(): + return "alfvén" + + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["mhd_rho3", "mhd_uv"], + propagators_fields.VariationalMomentumAdvection: ["mhd_uv"], + propagators_fields.VariationalEntropyEvolve: ["mhd_s3", "mhd_uv"], + propagators_fields.VariationalMagFieldEvolve: ["b2", "mhd_uv"], + propagators_fields.VariationalViscosity: ["mhd_s3", "mhd_uv"], + propagators_fields.VariationalResistivity: ["mhd_s3", "b2"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import H1vecMassMatrix_density, InternalEnergyEvaluator + from struphy.polar.basic import PolarVector + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_entropy = params["fluid"]["mhd"]["options"]["VariationalEntropyEvolve"]["lin_solver"] + nonlin_solver_entropy = params["fluid"]["mhd"]["options"]["VariationalEntropyEvolve"]["nonlin_solver"] + lin_solver_magfield = params["em_fields"]["options"]["VariationalMagFieldEvolve"]["lin_solver"] + nonlin_solver_magfield = params["em_fields"]["options"]["VariationalMagFieldEvolve"]["nonlin_solver"] + lin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["lin_solver"] + nonlin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["nonlin_solver"] + lin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["lin_solver"] + nonlin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["nonlin_solver"] + if "linearize_current" in params["fluid"]["mhd"]["options"]["VariationalResistivity"].keys(): + self._linearize_current = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["linearize_current"] + else: + self._linearize_current = False + self._gamma = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + self._mu = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu"] + self._mu_a = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu_a"] + self._alpha = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["alpha"] + self._eta = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta"] + self._eta_a = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta_a"] + model = "full" + + self._energy_evaluator = InternalEnergyEvaluator(self.derham, self._gamma) + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "s": self.pointer["mhd_s3"], + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + "energy_evaluator": self._energy_evaluator, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + self._kwargs[propagators_fields.VariationalEntropyEvolve] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_entropy, + "nonlin_solver": nonlin_solver_entropy, + "energy_evaluator": self._energy_evaluator, + } + + self._kwargs[propagators_fields.VariationalMagFieldEvolve] = { + "model": model, + "mass_ops": self.WMM, + "lin_solver": lin_solver_magfield, + "nonlin_solver": nonlin_solver_magfield, + } + + self._kwargs[propagators_fields.VariationalViscosity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "mu": self._mu, + "mu_a": self._mu_a, + "alpha": self._alpha, + "mass_ops": self.WMM, + "lin_solver": lin_solver_viscosity, + "nonlin_solver": nonlin_solver_viscosity, + "energy_evaluator": self._energy_evaluator, + } + + self._kwargs[propagators_fields.VariationalResistivity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "eta": self._eta, + "eta_a": self._eta_a, + "lin_solver": lin_solver_resistivity, + "nonlin_solver": nonlin_solver_resistivity, + "linearize_current": self._linearize_current, + "energy_evaluator": self._energy_evaluator, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_thermo") self.add_scalar("en_mag") @@ -563,24 +619,16 @@ def __init__( self.add_scalar("entr_tot") self.add_scalar("tot_div_B") - @property - def bulk_species(self): - return self.mhd - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - projV3 = L2Projector("L2", self._mass_ops) + # temporary vectors for scalar quantities + self._tmp_div_B = self.derham.Vh_pol["3"].zeros() + tmp_dof = self.derham.Vh_pol["3"].zeros() + projV3 = L2Projector("L2", self.mass_ops) def f(e1, e2, e3): return 1 - f = xp.vectorize(f) - self._integrator = projV3(f) - - self._energy_evaluator = InternalEnergyEvaluator(self.derham, self.propagators.variat_ent.options.gamma) + f = np.vectorize(f) + self._integrator = projV3(f, dofs=tmp_dof) self._ones = self.derham.Vh_pol["3"].zeros() if isinstance(self._ones, PolarVector): @@ -588,18 +636,12 @@ def f(e1, e2, e3): else: self._ones[:] = 1.0 - self._tmp_div_B = self.derham.Vh_pol["3"].zeros() - def update_scalar_quantities(self): - rho = self.mhd.density.spline.vector - u = self.mhd.velocity.spline.vector - s = self.mhd.entropy.spline.vector - b = self.em_fields.b_field.spline.vector - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + # Update mass matrix + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["mhd_uv"], self.pointer["mhd_uv"]) self.update_scalar("en_U", en_U) - en_mag = 0.5 * self.mass_ops.M2.dot_inner(b, b) + en_mag = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b2"], self.pointer["b2"]) self.update_scalar("en_mag", en_mag) en_thermo = self.update_thermo_energy() @@ -607,12 +649,12 @@ def update_scalar_quantities(self): en_tot = en_U + en_thermo + en_mag self.update_scalar("en_tot", en_tot) - dens_tot = self._ones.inner(rho) + dens_tot = self._ones.inner(self.pointer["mhd_rho3"]) self.update_scalar("dens_tot", dens_tot) - entr_tot = self._ones.inner(s) + entr_tot = self._ones.inner(self.pointer["mhd_s3"]) self.update_scalar("entr_tot", entr_tot) - div_B = self.derham.div.dot(b, out=self._tmp_div_B) + div_B = self.derham.div.dot(self.pointer["b2"], out=self._tmp_div_B) L2_div_B = self._mass_ops.M3.dot_inner(div_B, div_B) self.update_scalar("tot_div_B", L2_div_B) @@ -621,12 +663,9 @@ def update_thermo_energy(self): :meta private: """ - rho = self.mhd.density.spline.vector - s = self.mhd.entropy.spline.vector - en_prop = self.propagators.variat_dens - - self._energy_evaluator.sf.vector = s - self._energy_evaluator.rhof.vector = rho + en_prop = self._propagators[0] + self._energy_evaluator.sf.vector = self.pointer["mhd_s3"] + self._energy_evaluator.rhof.vector = self.pointer["mhd_rho3"] sf_values = self._energy_evaluator.sf.eval_tp_fixed_loc( self._energy_evaluator.integration_grid_spans, self._energy_evaluator.integration_grid_bd, @@ -644,44 +683,6 @@ def update_thermo_energy(self): self.update_scalar("en_thermo", en_thermo) return en_thermo - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='full',\n", - ] - new_file += [ - " s=model.mhd.entropy)\n", - ] - elif "variat_ent.Options" in line: - new_file += [ - "model.propagators.variat_ent.options = model.propagators.variat_ent.Options(model='full',\n", - ] - new_file += [ - " rho=model.mhd.density)\n", - ] - elif "variat_viscous.Options" in line: - new_file += [ - "model.propagators.variat_viscous.options = model.propagators.variat_viscous.Options(rho=model.mhd.density)\n", - ] - elif "variat_resist.Options" in line: - new_file += [ - "model.propagators.variat_resist.options = model.propagators.variat_resist.Options(rho=model.mhd.density)\n", - ] - elif "entropy.add_background" in line: - new_file += ["model.mhd.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class ViscousFluid(StruphyModel): r"""Full (non-linear) viscous Navier-Stokes equations discretized with a variational method. @@ -715,72 +716,122 @@ class ViscousFluid(StruphyModel): :ref:`Model info `: """ - ## species - - class Fluid(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.entropy = FEECVariable(space="L2") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self, with_viscosity: bool = True): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - self.variat_ent = propagators_fields.VariationalEntropyEvolve() - if with_viscosity: - self.variat_viscous = propagators_fields.VariationalViscosity() - - ## abstract methods + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["fluid"]["fluid"] = {"rho3": "L2", "s3": "L2", "uv": "H1vec"} + return dct - def __init__(self, with_viscosity: bool = True): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def bulk_species(): + return "fluid" - # 1. instantiate all species - self.fluid = self.Fluid() - - # 2. instantiate all propagators - self.propagators = self.Propagators(with_viscosity=with_viscosity) - - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.fluid.density - self.propagators.variat_dens.variables.u = self.fluid.velocity - self.propagators.variat_mom.variables.u = self.fluid.velocity - self.propagators.variat_ent.variables.s = self.fluid.entropy - self.propagators.variat_ent.variables.u = self.fluid.velocity - if with_viscosity: - self.propagators.variat_viscous.variables.s = self.fluid.entropy - self.propagators.variat_viscous.variables.u = self.fluid.velocity + @staticmethod + def velocity_scale(): + return "alfvén" - # define scalars for update_scalar_quantities + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["fluid_rho3", "fluid_uv"], + propagators_fields.VariationalMomentumAdvection: ["fluid_uv"], + propagators_fields.VariationalEntropyEvolve: ["fluid_s3", "fluid_uv"], + propagators_fields.VariationalViscosity: ["fluid_s3", "fluid_uv"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.polar.basic import PolarVector + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + from struphy.feec.variational_utilities import H1vecMassMatrix_density, InternalEnergyEvaluator + + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_momentum = params["fluid"]["fluid"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["fluid"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_density = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_entropy = params["fluid"]["fluid"]["options"]["VariationalEntropyEvolve"]["lin_solver"] + nonlin_solver_entropy = params["fluid"]["fluid"]["options"]["VariationalEntropyEvolve"]["nonlin_solver"] + lin_solver_viscosity = params["fluid"]["fluid"]["options"]["VariationalViscosity"]["lin_solver"] + nonlin_solver_viscosity = params["fluid"]["fluid"]["options"]["VariationalViscosity"]["nonlin_solver"] + + self._gamma = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + self._mu = params["fluid"]["fluid"]["options"]["VariationalViscosity"]["physics"]["mu"] + self._mu_a = params["fluid"]["fluid"]["options"]["VariationalViscosity"]["physics"]["mu_a"] + model = "full" + + self._energy_evaluator = InternalEnergyEvaluator(self.derham, self._gamma) + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "s": self.pointer["fluid_s3"], + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + "energy_evaluator": self._energy_evaluator, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + self._kwargs[propagators_fields.VariationalEntropyEvolve] = { + "model": model, + "rho": self.pointer["fluid_rho3"], + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_entropy, + "nonlin_solver": nonlin_solver_entropy, + "energy_evaluator": self._energy_evaluator, + } + + self._kwargs[propagators_fields.VariationalViscosity] = { + "model": model, + "gamma": self._gamma, + "rho": self.pointer["fluid_rho3"], + "mu": self._mu, + "mu_a": self._mu_a, + "mass_ops": self.WMM, + "lin_solver": lin_solver_viscosity, + "nonlin_solver": nonlin_solver_viscosity, + "energy_evaluator": self._energy_evaluator, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_thermo") self.add_scalar("en_tot") self.add_scalar("dens_tot") self.add_scalar("entr_tot") - @property - def bulk_species(self): - return self.fluid - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - projV3 = L2Projector("L2", self._mass_ops) + # temporary vectors for scalar quantities + tmp_dof = self.derham.Vh_pol["3"].zeros() + projV3 = L2Projector("L2", self.mass_ops) def f(e1, e2, e3): return 1 - f = xp.vectorize(f) - self._integrator = projV3(f) - - self._energy_evaluator = InternalEnergyEvaluator(self.derham, self.propagators.variat_ent.options.gamma) + f = np.vectorize(f) + self._integrator = projV3(f, dofs=tmp_dof) self._ones = self.derham.Vh_pol["3"].zeros() if isinstance(self._ones, PolarVector): @@ -789,11 +840,8 @@ def f(e1, e2, e3): self._ones[:] = 1.0 def update_scalar_quantities(self): - rho = self.fluid.density.spline.vector - u = self.fluid.velocity.spline.vector - s = self.fluid.entropy.spline.vector - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + # Update mass matrix + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["fluid_uv"], self.pointer["fluid_uv"]) self.update_scalar("en_U", en_U) en_thermo = self.update_thermo_energy() @@ -801,9 +849,9 @@ def update_scalar_quantities(self): en_tot = en_U + en_thermo self.update_scalar("en_tot", en_tot) - dens_tot = self._ones.inner(rho) + dens_tot = self._ones.inner(self.pointer["fluid_rho3"]) self.update_scalar("dens_tot", dens_tot) - entr_tot = self._ones.inner(s) + entr_tot = self._ones.inner(self.pointer["fluid_s3"]) self.update_scalar("entr_tot", entr_tot) def update_thermo_energy(self): @@ -811,12 +859,9 @@ def update_thermo_energy(self): :meta private: """ - rho = self.fluid.density.spline.vector - s = self.fluid.entropy.spline.vector - en_prop = self.propagators.variat_dens - - self._energy_evaluator.sf.vector = s - self._energy_evaluator.rhof.vector = rho + en_prop = self._propagators[0] + self._energy_evaluator.sf.vector = self.pointer["fluid_s3"] + self._energy_evaluator.rhof.vector = self.pointer["fluid_rho3"] sf_values = self._energy_evaluator.sf.eval_tp_fixed_loc( self._energy_evaluator.integration_grid_spans, self._energy_evaluator.integration_grid_bd, @@ -834,42 +879,8 @@ def update_thermo_energy(self): self.update_scalar("en_thermo", en_thermo) return en_thermo - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='full',\n", - ] - new_file += [ - " s=model.fluid.entropy)\n", - ] - elif "variat_ent.Options" in line: - new_file += [ - "model.propagators.variat_ent.options = model.propagators.variat_ent.Options(model='full',\n", - ] - new_file += [ - " rho=model.fluid.density)\n", - ] - elif "variat_viscous.Options" in line: - new_file += [ - "model.propagators.variat_viscous.options = model.propagators.variat_viscous.Options(rho=model.fluid.density)\n", - ] - elif "entropy.add_background" in line: - new_file += ["model.fluid.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - - -class ViscoResistiveMHD_with_p(StruphyModel): + +class ViscoresistiveMHD_with_p(StruphyModel): r"""Full (non-linear) visco-resistive MHD equations, with the pressure variable discretized with a variational method. :ref:`normalization`: @@ -903,78 +914,121 @@ class ViscoResistiveMHD_with_p(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.pressure = FEECVariable(space="L2") - self.init_variables() - - class Diagnostics(DiagnosticSpecies): - def __init__(self): - self.div_u = FEECVariable(space="L2") - self.u2 = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - self.variat_pb = propagators_fields.VariationalPBEvolve() - if with_viscosity: - self.variat_viscous = propagators_fields.VariationalViscosity() - if with_resistivity: - self.variat_resist = propagators_fields.VariationalResistivity() - - ## abstract methods - - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.diagnostics = self.Diagnostics() - - # 2. instantiate all propagators - self.propagators = self.Propagators( - with_viscosity=with_viscosity, - with_resistivity=with_resistivity, - ) + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["em_fields"]["b2"] = "Hdiv" + dct["fluid"]["mhd"] = {"rho3": "L2", "p3": "L2", "uv": "H1vec"} + return dct + + @staticmethod + def bulk_species(): + return "mhd" + + @staticmethod + def velocity_scale(): + return "alfvén" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.mhd.density - self.propagators.variat_dens.variables.u = self.mhd.velocity - self.propagators.variat_mom.variables.u = self.mhd.velocity - self.propagators.variat_pb.variables.u = self.mhd.velocity - self.propagators.variat_pb.variables.p = self.mhd.pressure - self.propagators.variat_pb.variables.b = self.em_fields.b_field - if with_viscosity: - self.propagators.variat_viscous.variables.s = self.mhd.pressure - self.propagators.variat_viscous.variables.u = self.mhd.velocity - if with_resistivity: - self.propagators.variat_resist.variables.s = self.mhd.pressure - self.propagators.variat_resist.variables.b = self.em_fields.b_field - - # define scalars for update_scalar_quantities + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["mhd_rho3", "mhd_uv"], + propagators_fields.VariationalMomentumAdvection: ["mhd_uv"], + propagators_fields.VariationalPBEvolve: ["mhd_p3", "b2", "mhd_uv"], + propagators_fields.VariationalViscosity: ["mhd_p3", "mhd_uv"], + propagators_fields.VariationalResistivity: ["mhd_p3", "b2"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import H1vecMassMatrix_density + from struphy.polar.basic import PolarVector + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalPBEvolve"]["lin_solver"] + nonlin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalPBEvolve"]["nonlin_solver"] + lin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["lin_solver"] + nonlin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["nonlin_solver"] + lin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["lin_solver"] + nonlin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["nonlin_solver"] + if "linearize_current" in params["fluid"]["mhd"]["options"]["VariationalResistivity"].keys(): + self._linearize_current = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["linearize_current"] + else: + self._linearize_current = False + self._gamma = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + self._mu = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu"] + self._mu_a = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu_a"] + self._alpha = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["alpha"] + self._eta = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta"] + self._eta_a = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta_a"] + model = "full_p" + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + self._kwargs[propagators_fields.VariationalPBEvolve] = { + "model": model, + "mass_ops": self.WMM, + "lin_solver": lin_solver_magfield, + "nonlin_solver": nonlin_solver_magfield, + "gamma": self._gamma, + } + + self._kwargs[propagators_fields.VariationalViscosity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "mu": self._mu, + "mu_a": self._mu_a, + "alpha": self._alpha, + "mass_ops": self.WMM, + "lin_solver": lin_solver_viscosity, + "nonlin_solver": nonlin_solver_viscosity, + } + + self._kwargs[propagators_fields.VariationalResistivity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "eta": self._eta, + "eta_a": self._eta_a, + "lin_solver": lin_solver_resistivity, + "nonlin_solver": nonlin_solver_resistivity, + "linearize_current": self._linearize_current, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_thermo") self.add_scalar("en_mag") @@ -982,22 +1036,12 @@ def __init__( self.add_scalar("dens_tot") self.add_scalar("tot_div_B") - @property - def bulk_species(self): - return self.mhd - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - projV3 = L2Projector("L2", self._mass_ops) - - def f(e1, e2, e3): - return 1 + # temporary vectors for scalar quantities + self._tmp_div_B = self.derham.Vh_pol["3"].zeros() + tmp_dof = self.derham.Vh_pol["3"].zeros() + projV3 = L2Projector("L2", self.mass_ops) - f = xp.vectorize(f) - self._integrator = projV3(f) + self._integrator = projV3(self.domain.jacobian_det, dofs=tmp_dof) self._ones = self.derham.Vh_pol["3"].zeros() if isinstance(self._ones, PolarVector): @@ -1005,68 +1049,39 @@ def f(e1, e2, e3): else: self._ones[:] = 1.0 - self._tmp_div_B = self.derham.Vh_pol["3"].zeros() - def update_scalar_quantities(self): - rho = self.mhd.density.spline.vector - u = self.mhd.velocity.spline.vector - p = self.mhd.pressure.spline.vector - b = self.em_fields.b_field.spline.vector - - gamma = self.propagators.variat_pb.options.gamma - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + # Update mass matrix + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["mhd_uv"], self.pointer["mhd_uv"]) self.update_scalar("en_U", en_U) - en_mag = 0.5 * self.mass_ops.M2.dot_inner(b, b) + en_mag = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b2"], self.pointer["b2"]) self.update_scalar("en_mag", en_mag) - en_thermo = self.mass_ops.M3.dot_inner(p, self._integrator) / (gamma - 1.0) + en_thermo = self.mass_ops.M3.dot_inner(self.pointer["mhd_p3"], self._integrator) / (self._gamma - 1.0) self.update_scalar("en_thermo", en_thermo) en_tot = en_U + en_thermo + en_mag self.update_scalar("en_tot", en_tot) - dens_tot = self._ones.inner(rho) + dens_tot = self._ones.inner(self.pointer["mhd_rho3"]) self.update_scalar("dens_tot", dens_tot) - div_B = self.derham.div.dot(b, out=self._tmp_div_B) + div_B = self.derham.div.dot(self.pointer["b2"], out=self._tmp_div_B) L2_div_B = self._mass_ops.M3.dot_inner(div_B, div_B) self.update_scalar("tot_div_B", L2_div_B) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_pb.Options" in line: - new_file += [ - "model.propagators.variat_pb.options = model.propagators.variat_pb.Options(div_u=model.diagnostics.div_u,\n", - ] - new_file += [ - " u2=model.diagnostics.u2)\n", - ] - elif "variat_viscous.Options" in line: - new_file += [ - "model.propagators.variat_viscous.options = model.propagators.variat_viscous.Options(rho=model.mhd.density)\n", - ] - elif "variat_resist.Options" in line: - new_file += [ - "model.propagators.variat_resist.options = model.propagators.variat_resist.Options(rho=model.mhd.density)\n", - ] - elif "pressure.add_background" in line: - new_file += ["model.mhd.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - - -class ViscoResistiveLinearMHD(StruphyModel): + @staticmethod + def diagnostics_dct(): + dct = {} + + dct["div_u"] = "L2" + dct["u2"] = "Hdiv" + return dct + + __diagnostics__ = diagnostics_dct() + + +class ViscoresistiveLinearMHD(StruphyModel): r"""Linear visco-resistive MHD equations discretized with a variational method. :ref:`normalization`: @@ -1099,104 +1114,136 @@ class ViscoResistiveLinearMHD(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.pressure = FEECVariable(space="L2") - self.init_variables() - - class Diagnostics(DiagnosticSpecies): - def __init__(self): - self.div_u = FEECVariable(space="L2") - self.u2 = FEECVariable(space="Hdiv") - self.pt3 = FEECVariable(space="L2") - self.bt2 = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_pb = propagators_fields.VariationalPBEvolve() - if with_viscosity: - self.variat_viscous = propagators_fields.VariationalViscosity() - if with_resistivity: - self.variat_resist = propagators_fields.VariationalResistivity() - - ## abstract methods - - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.diagnostics = self.Diagnostics() - - # 2. instantiate all propagators - self.propagators = self.Propagators( - with_viscosity=with_viscosity, - with_resistivity=with_resistivity, - ) + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["em_fields"]["b2"] = "Hdiv" + dct["fluid"]["mhd"] = {"rho3": "L2", "p3": "L2", "uv": "H1vec"} + return dct + + @staticmethod + def bulk_species(): + return "mhd" + + @staticmethod + def velocity_scale(): + return "alfvén" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.mhd.density - self.propagators.variat_dens.variables.u = self.mhd.velocity - self.propagators.variat_pb.variables.u = self.mhd.velocity - self.propagators.variat_pb.variables.p = self.mhd.pressure - self.propagators.variat_pb.variables.b = self.em_fields.b_field - if with_viscosity: - self.propagators.variat_viscous.variables.s = self.mhd.pressure - self.propagators.variat_viscous.variables.u = self.mhd.velocity - if with_resistivity: - self.propagators.variat_resist.variables.s = self.mhd.pressure - self.propagators.variat_resist.variables.b = self.em_fields.b_field - - # define scalars for update_scalar_quantities + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["mhd_rho3", "mhd_uv"], + propagators_fields.VariationalPBEvolve: ["mhd_p3", "b2", "mhd_uv"], + propagators_fields.VariationalViscosity: ["mhd_p3", "mhd_uv"], + propagators_fields.VariationalResistivity: ["mhd_p3", "b2"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import H1vecMassMatrix_density + from struphy.polar.basic import PolarVector + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalPBEvolve"]["lin_solver"] + nonlin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalPBEvolve"]["nonlin_solver"] + lin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["lin_solver"] + nonlin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["nonlin_solver"] + lin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["lin_solver"] + nonlin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["nonlin_solver"] + if "linearize_current" in params["fluid"]["mhd"]["options"]["VariationalResistivity"].keys(): + self._linearize_current = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["linearize_current"] + else: + self._linearize_current = False + self._gamma = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + self._mu = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu"] + self._mu_a = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu_a"] + self._alpha = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["alpha"] + self._eta = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta"] + self._eta_a = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta_a"] + model = "linear" + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + } + + self._kwargs[propagators_fields.VariationalPBEvolve] = { + "model": model, + "mass_ops": self.WMM, + "lin_solver": lin_solver_magfield, + "nonlin_solver": nonlin_solver_magfield, + "gamma": self._gamma, + "div_u": self.pointer["div_u"], + "u2": self.pointer["u2"], + "bt2": self.pointer["bt2"], + "pt3": self.pointer["pt3"], + } + + self._kwargs[propagators_fields.VariationalViscosity] = { + "model": "linear_p", + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "mu": self._mu, + "mu_a": self._mu_a, + "alpha": self._alpha, + "mass_ops": self.WMM, + "lin_solver": lin_solver_viscosity, + "nonlin_solver": nonlin_solver_viscosity, + } + + self._kwargs[propagators_fields.VariationalResistivity] = { + "model": "linear_p", + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "eta": self._eta, + "eta_a": self._eta_a, + "lin_solver": lin_solver_resistivity, + "nonlin_solver": nonlin_solver_resistivity, + "linearize_current": self._linearize_current, + "pt3": self.pointer["pt3"], + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_thermo") self.add_scalar("en_mag_1") self.add_scalar("en_mag_2") self.add_scalar("en_tot") + # self.add_scalar("dens_tot") + # self.add_scalar("tot_div_B") + self.add_scalar("en_tot_l1") self.add_scalar("en_thermo_l1") self.add_scalar("en_mag_l1") - @property - def bulk_species(self): - return self.mhd - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - projV3 = L2Projector("L2", self._mass_ops) - - def f(e1, e2, e3): - return 1 + # temporary vectors for scalar quantities + self._tmp_div_B = self.derham.Vh_pol["3"].zeros() + tmp_dof = self.derham.Vh_pol["3"].zeros() + projV3 = L2Projector("L2", self.mass_ops) - f = xp.vectorize(f) - self._integrator = projV3(f) + self._integrator = projV3(self.domain.jacobian_det, dofs=tmp_dof) self._ones = self.derham.Vh_pol["3"].zeros() if isinstance(self._ones, PolarVector): @@ -1204,104 +1251,52 @@ def f(e1, e2, e3): else: self._ones[:] = 1.0 - self._tmp_div_B = self.derham.Vh_pol["3"].zeros() - def update_scalar_quantities(self): - rho = self.mhd.density.spline.vector - u = self.mhd.velocity.spline.vector - p = self.mhd.pressure.spline.vector - b = self.em_fields.b_field.spline.vector - bt2 = self.propagators.variat_pb.options.bt2.spline.vector - pt3 = self.propagators.variat_pb.options.pt3.spline.vector - - gamma = self.propagators.variat_pb.options.gamma - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + # Update mass matrix + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["mhd_uv"], self.pointer["mhd_uv"]) self.update_scalar("en_U", en_U) - en_mag1 = 0.5 * self.mass_ops.M2.dot_inner(b, b) + en_mag1 = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b2"], self.pointer["b2"]) self.update_scalar("en_mag_1", en_mag1) - en_mag2 = self.mass_ops.M2.dot_inner(bt2, self.projected_equil.b2) + en_mag2 = self.mass_ops.M2.dot_inner(self.pointer["bt2"], self.projected_equil.b2) self.update_scalar("en_mag_2", en_mag2) - en_thermo = self.mass_ops.M3.dot_inner(pt3, self._integrator) / (gamma - 1.0) + en_thermo = self.mass_ops.M3.dot_inner(self.pointer["pt3"], self._integrator) / (self._gamma - 1.0) self.update_scalar("en_thermo", en_thermo) en_tot = en_U + en_thermo + en_mag1 + en_mag2 self.update_scalar("en_tot", en_tot) - # dens_tot = self._ones.inner(rho) + # dens_tot = self._ones.inner(self.pointer["mhd_rho3"]) # self.update_scalar("dens_tot", dens_tot) - # div_B = self.derham.div.dot(b, out=self._tmp_div_B) + # div_B = self.derham.div.dot(self.pointer["b2"], out=self._tmp_div_B) # L2_div_B = self._mass_ops.M3.dot_inner(div_B, div_B) # self.update_scalar("tot_div_B", L2_div_B) - en_thermo_l1 = self.mass_ops.M3.dot_inner(p, self._integrator) / (gamma - 1.0) + en_thermo_l1 = self.mass_ops.M3.dot_inner(self.pointer["mhd_p3"], self._integrator) / (self._gamma - 1.0) self.update_scalar("en_thermo_l1", en_thermo_l1) - en_mag_l1 = self.mass_ops.M2.dot_inner(b, self.projected_equil.b2) + en_mag_l1 = self.mass_ops.M2.dot_inner(self.pointer["b2"], self.projected_equil.b2) self.update_scalar("en_mag_l1", en_mag_l1) en_tot_l1 = en_thermo_l1 + en_mag_l1 self.update_scalar("en_tot_l1", en_tot_l1) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='linear')\n", - ] - elif "variat_pb.Options" in line: - new_file += [ - "model.propagators.variat_pb.options = model.propagators.variat_pb.Options(model='linear',\n", - ] - new_file += [ - " div_u=model.diagnostics.div_u,\n", - ] - new_file += [ - " u2=model.diagnostics.u2,\n", - ] - new_file += [ - " pt3=model.diagnostics.pt3,\n", - ] - new_file += [ - " bt2=model.diagnostics.bt2)\n", - ] - elif "variat_viscous.Options" in line: - new_file += [ - "model.propagators.variat_viscous.options = model.propagators.variat_viscous.Options(model='linear_p',\n", - ] - new_file += [ - " rho=model.mhd.density)\n", - ] - elif "variat_resist.Options" in line: - new_file += [ - "model.propagators.variat_resist.options = model.propagators.variat_resist.Options(model='linear_p',\n", - ] - new_file += [ - " rho=model.mhd.density,\n", - ] - new_file += [ - " pt3=model.diagnostics.pt3)\n", - ] - elif "pressure.add_background" in line: - new_file += ["model.mhd.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - - -class ViscoResistiveDeltafMHD(StruphyModel): + @staticmethod + def diagnostics_dct(): + dct = {} + dct["bt2"] = "Hdiv" + dct["pt3"] = "L2" + dct["div_u"] = "L2" + dct["u2"] = "Hdiv" + return dct + + __diagnostics__ = diagnostics_dct() + + +class ViscoresistiveDeltafMHD(StruphyModel): r""":math:`\delta f` visco-resistive MHD equations discretized with a variational method. :ref:`normalization`: @@ -1335,106 +1330,141 @@ class ViscoResistiveDeltafMHD(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.pressure = FEECVariable(space="L2") - self.init_variables() - - class Diagnostics(DiagnosticSpecies): - def __init__(self): - self.div_u = FEECVariable(space="L2") - self.u2 = FEECVariable(space="Hdiv") - self.pt3 = FEECVariable(space="L2") - self.bt2 = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - self.variat_pb = propagators_fields.VariationalPBEvolve() - if with_viscosity: - self.variat_viscous = propagators_fields.VariationalViscosity() - if with_resistivity: - self.variat_resist = propagators_fields.VariationalResistivity() - - ## abstract methods - - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.diagnostics = self.Diagnostics() - - # 2. instantiate all propagators - self.propagators = self.Propagators( - with_viscosity=with_viscosity, - with_resistivity=with_resistivity, - ) + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["em_fields"]["b2"] = "Hdiv" + dct["fluid"]["mhd"] = {"rho3": "L2", "p3": "L2", "uv": "H1vec"} + return dct + + @staticmethod + def bulk_species(): + return "mhd" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.mhd.density - self.propagators.variat_dens.variables.u = self.mhd.velocity - self.propagators.variat_mom.variables.u = self.mhd.velocity - self.propagators.variat_pb.variables.u = self.mhd.velocity - self.propagators.variat_pb.variables.p = self.mhd.pressure - self.propagators.variat_pb.variables.b = self.em_fields.b_field - if with_viscosity: - self.propagators.variat_viscous.variables.s = self.mhd.pressure - self.propagators.variat_viscous.variables.u = self.mhd.velocity - if with_resistivity: - self.propagators.variat_resist.variables.s = self.mhd.pressure - self.propagators.variat_resist.variables.b = self.em_fields.b_field - - # define scalars for update_scalar_quantities + @staticmethod + def velocity_scale(): + return "alfvén" + + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["mhd_rho3", "mhd_uv"], + propagators_fields.VariationalMomentumAdvection: ["mhd_uv"], + propagators_fields.VariationalPBEvolve: ["mhd_p3", "b2", "mhd_uv"], + propagators_fields.VariationalViscosity: ["mhd_p3", "mhd_uv"], + propagators_fields.VariationalResistivity: ["mhd_p3", "b2"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import H1vecMassMatrix_density + from struphy.polar.basic import PolarVector + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalPBEvolve"]["lin_solver"] + nonlin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalPBEvolve"]["nonlin_solver"] + lin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["lin_solver"] + nonlin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["nonlin_solver"] + lin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["lin_solver"] + nonlin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["nonlin_solver"] + if "linearize_current" in params["fluid"]["mhd"]["options"]["VariationalResistivity"].keys(): + self._linearize_current = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["linearize_current"] + else: + self._linearize_current = False + self._gamma = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + self._mu = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu"] + self._mu_a = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu_a"] + self._alpha = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["alpha"] + self._eta = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta"] + self._eta_a = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta_a"] + model = "deltaf" + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + self._kwargs[propagators_fields.VariationalPBEvolve] = { + "model": model, + "mass_ops": self.WMM, + "lin_solver": lin_solver_magfield, + "nonlin_solver": nonlin_solver_magfield, + "gamma": self._gamma, + "bt2": self.pointer["bt2"], + "pt3": self.pointer["pt3"], + } + + self._kwargs[propagators_fields.VariationalViscosity] = { + "model": "full_p", + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "mu": self._mu, + "mu_a": self._mu_a, + "alpha": self._alpha, + "mass_ops": self.WMM, + "lin_solver": lin_solver_viscosity, + "nonlin_solver": nonlin_solver_viscosity, + } + + self._kwargs[propagators_fields.VariationalResistivity] = { + "model": "delta_p", + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "eta": self._eta, + "eta_a": self._eta_a, + "lin_solver": lin_solver_resistivity, + "nonlin_solver": nonlin_solver_resistivity, + "linearize_current": self._linearize_current, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_thermo") self.add_scalar("en_mag_1") self.add_scalar("en_mag_2") self.add_scalar("en_tot") + # self.add_scalar("dens_tot") + # self.add_scalar("tot_div_B") + self.add_scalar("en_tot_l1") self.add_scalar("en_thermo_l1") self.add_scalar("en_mag_l1") - @property - def bulk_species(self): - return self.mhd + # temporary vectors for scalar quantities + tmp_dof = self.derham.Vh_pol["3"].zeros() + projV3 = L2Projector("L2", self.mass_ops) - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - projV3 = L2Projector("L2", self._mass_ops) - - def f(e1, e2, e3): - return 1 - - f = xp.vectorize(f) - self._integrator = projV3(f) + self._integrator = projV3(self.domain.jacobian_det, dofs=tmp_dof) self._ones = self.derham.Vh_pol["3"].zeros() if isinstance(self._ones, PolarVector): @@ -1442,95 +1472,52 @@ def f(e1, e2, e3): else: self._ones[:] = 1.0 - self._tmp_div_B = self.derham.Vh_pol["3"].zeros() - def update_scalar_quantities(self): - rho = self.mhd.density.spline.vector - u = self.mhd.velocity.spline.vector - p = self.mhd.pressure.spline.vector - b = self.em_fields.b_field.spline.vector - bt2 = self.propagators.variat_pb.options.bt2.spline.vector - pt3 = self.propagators.variat_pb.options.pt3.spline.vector - - gamma = self.propagators.variat_pb.options.gamma - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + # Update mass matrix + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["mhd_uv"], self.pointer["mhd_uv"]) self.update_scalar("en_U", en_U) - en_mag1 = 0.5 * self.mass_ops.M2.dot_inner(b, b) + en_mag1 = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b2"], self.pointer["b2"]) self.update_scalar("en_mag_1", en_mag1) - en_mag2 = self.mass_ops.M2.dot_inner(bt2, self.projected_equil.b2) + en_mag2 = self.mass_ops.M2.dot_inner(self.pointer["bt2"], self.projected_equil.b2) self.update_scalar("en_mag_2", en_mag2) - en_thermo = self.mass_ops.M3.dot_inner(pt3, self._integrator) / (gamma - 1.0) + en_thermo = self.mass_ops.M3.dot_inner(self.pointer["pt3"], self._integrator) / (self._gamma - 1.0) self.update_scalar("en_thermo", en_thermo) en_tot = en_U + en_thermo + en_mag1 + en_mag2 self.update_scalar("en_tot", en_tot) - # dens_tot = self._ones.inner(rho) + # dens_tot = self._ones.inner(self.pointer["mhd_rho3"]) # self.update_scalar("dens_tot", dens_tot) - # div_B = self.derham.div.dot(b, out=self._tmp_div_B) + # div_B = self.derham.div.dot(self.pointer["b2"], out=self._tmp_div_B) # L2_div_B = self._mass_ops.M3.dot_inner(div_B, div_B) # self.update_scalar("tot_div_B", L2_div_B) - en_thermo_l1 = self.mass_ops.M3.dot_inner(p, self._integrator) / (gamma - 1.0) + en_thermo_l1 = self.mass_ops.M3.dot_inner(self.pointer["mhd_p3"], self._integrator) / (self._gamma - 1.0) self.update_scalar("en_thermo_l1", en_thermo_l1) - en_mag_l1 = self.mass_ops.M2.dot_inner(b, self.projected_equil.b2) + en_mag_l1 = self.mass_ops.M2.dot_inner(self.pointer["b2"], self.projected_equil.b2) self.update_scalar("en_mag_l1", en_mag_l1) en_tot_l1 = en_thermo_l1 + en_mag_l1 self.update_scalar("en_tot_l1", en_tot_l1) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='deltaf')\n", - ] - elif "variat_pb.Options" in line: - new_file += [ - "model.propagators.variat_pb.options = model.propagators.variat_pb.Options(model='deltaf',\n", - ] - new_file += [ - " pt3=model.diagnostics.pt3,\n", - ] - new_file += [ - " bt2=model.diagnostics.bt2)\n", - ] - elif "variat_viscous.Options" in line: - new_file += [ - "model.propagators.variat_viscous.options = model.propagators.variat_viscous.Options(model='full_p',\n", - ] - new_file += [ - " rho=model.mhd.density)\n", - ] - elif "variat_resist.Options" in line: - new_file += [ - "model.propagators.variat_resist.options = model.propagators.variat_resist.Options(model='full_p',\n", - ] - new_file += [ - " rho=model.mhd.density)\n", - ] - elif "pressure.add_background" in line: - new_file += ["model.mhd.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - - -class ViscoResistiveMHD_with_q(StruphyModel): + @staticmethod + def diagnostics_dct(): + dct = {} + dct["bt2"] = "Hdiv" + dct["pt3"] = "L2" + dct["div_u"] = "L2" + dct["u2"] = "Hdiv" + return dct + + __diagnostics__ = diagnostics_dct() + + +class ViscoresistiveMHD_with_q(StruphyModel): r"""Full (non-linear) visco-resistive MHD equations, with the q variable (square root of the pressure) discretized with a variational method. :ref:`normalization`: @@ -1566,78 +1553,121 @@ class ViscoResistiveMHD_with_q(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.sqrt_p = FEECVariable(space="L2") - self.init_variables() - - class Diagnostics(DiagnosticSpecies): - def __init__(self): - self.div_u = FEECVariable(space="L2") - self.u2 = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - self.variat_qb = propagators_fields.VariationalQBEvolve() - if with_viscosity: - self.variat_viscous = propagators_fields.VariationalViscosity() - if with_resistivity: - self.variat_resist = propagators_fields.VariationalResistivity() - - ## abstract methods - - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.diagnostics = self.Diagnostics() - - # 2. instantiate all propagators - self.propagators = self.Propagators( - with_viscosity=with_viscosity, - with_resistivity=with_resistivity, - ) + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["em_fields"]["b2"] = "Hdiv" + dct["fluid"]["mhd"] = {"rho3": "L2", "q3": "L2", "uv": "H1vec"} + return dct - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.mhd.density - self.propagators.variat_dens.variables.u = self.mhd.velocity - self.propagators.variat_mom.variables.u = self.mhd.velocity - self.propagators.variat_qb.variables.u = self.mhd.velocity - self.propagators.variat_qb.variables.q = self.mhd.sqrt_p - self.propagators.variat_qb.variables.b = self.em_fields.b_field - if with_viscosity: - self.propagators.variat_viscous.variables.s = self.mhd.sqrt_p - self.propagators.variat_viscous.variables.u = self.mhd.velocity - if with_resistivity: - self.propagators.variat_resist.variables.s = self.mhd.sqrt_p - self.propagators.variat_resist.variables.b = self.em_fields.b_field - - # define scalars for update_scalar_quantities + @staticmethod + def bulk_species(): + return "mhd" + + @staticmethod + def velocity_scale(): + return "alfvén" + + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["mhd_rho3", "mhd_uv"], + propagators_fields.VariationalMomentumAdvection: ["mhd_uv"], + propagators_fields.VariationalQBEvolve: ["mhd_q3", "b2", "mhd_uv"], + propagators_fields.VariationalViscosity: ["mhd_q3", "mhd_uv"], + propagators_fields.VariationalResistivity: ["mhd_q3", "b2"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import H1vecMassMatrix_density + from struphy.polar.basic import PolarVector + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalQBEvolve"]["lin_solver"] + nonlin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalQBEvolve"]["nonlin_solver"] + lin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["lin_solver"] + nonlin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["nonlin_solver"] + lin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["lin_solver"] + nonlin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["nonlin_solver"] + if "linearize_current" in params["fluid"]["mhd"]["options"]["VariationalResistivity"].keys(): + self._linearize_current = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["linearize_current"] + else: + self._linearize_current = False + self._gamma = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + self._mu = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu"] + self._mu_a = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu_a"] + self._alpha = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["alpha"] + self._eta = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta"] + self._eta_a = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta_a"] + model = "full_q" + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + self._kwargs[propagators_fields.VariationalQBEvolve] = { + "model": model, + "mass_ops": self.WMM, + "lin_solver": lin_solver_magfield, + "nonlin_solver": nonlin_solver_magfield, + "gamma": self._gamma, + } + + self._kwargs[propagators_fields.VariationalViscosity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "mu": self._mu, + "mu_a": self._mu_a, + "alpha": self._alpha, + "mass_ops": self.WMM, + "lin_solver": lin_solver_viscosity, + "nonlin_solver": nonlin_solver_viscosity, + } + + self._kwargs[propagators_fields.VariationalResistivity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "eta": self._eta, + "eta_a": self._eta_a, + "lin_solver": lin_solver_resistivity, + "nonlin_solver": nonlin_solver_resistivity, + "linearize_current": self._linearize_current, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_thermo") self.add_scalar("en_mag") @@ -1645,22 +1675,12 @@ def __init__( self.add_scalar("dens_tot") self.add_scalar("tot_div_B") - @property - def bulk_species(self): - return self.mhd - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - projV3 = L2Projector("L2", self._mass_ops) - - def f(e1, e2, e3): - return 1 + # temporary vectors for scalar quantities + self._tmp_div_B = self.derham.Vh_pol["3"].zeros() + tmp_dof = self.derham.Vh_pol["3"].zeros() + projV3 = L2Projector("L2", self.mass_ops) - f = xp.vectorize(f) - self._integrator = projV3(f) + self._integrator = projV3(self.domain.jacobian_det, dofs=tmp_dof) self._ones = self.derham.Vh_pol["3"].zeros() if isinstance(self._ones, PolarVector): @@ -1668,75 +1688,39 @@ def f(e1, e2, e3): else: self._ones[:] = 1.0 - self._tmp_div_B = self.derham.Vh_pol["3"].zeros() - def update_scalar_quantities(self): - rho = self.mhd.density.spline.vector - u = self.mhd.velocity.spline.vector - q = self.mhd.sqrt_p.spline.vector - b = self.em_fields.b_field.spline.vector - - gamma = self.propagators.variat_qb.options.gamma - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + # Update mass matrix + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["mhd_uv"], self.pointer["mhd_uv"]) self.update_scalar("en_U", en_U) - en_mag = 0.5 * self.mass_ops.M2.dot_inner(b, b) + en_mag = 0.5 * self._mass_ops.M2.dot_inner(self.pointer["b2"], self.pointer["b2"]) self.update_scalar("en_mag", en_mag) - en_thermo = 1.0 / (gamma - 1.0) * self._mass_ops.M3.dot_inner(q, q) + en_thermo = 1 / (self._gamma - 1) * self._mass_ops.M3.dot_inner(self.pointer["mhd_q3"], self.pointer["mhd_q3"]) self.update_scalar("en_thermo", en_thermo) en_tot = en_U + en_thermo + en_mag self.update_scalar("en_tot", en_tot) - dens_tot = self._ones.inner(rho) + dens_tot = self._ones.inner(self.pointer["mhd_rho3"]) self.update_scalar("dens_tot", dens_tot) - div_B = self.derham.div.dot(b, out=self._tmp_div_B) + div_B = self.derham.div.dot(self.pointer["b2"], out=self._tmp_div_B) L2_div_B = self._mass_ops.M3.dot_inner(div_B, div_B) self.update_scalar("tot_div_B", L2_div_B) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='full_q')\n", - ] - elif "variat_qb.Options" in line: - new_file += [ - "model.propagators.variat_qb.options = model.propagators.variat_qb.Options(model='full_q')\n", - ] - elif "variat_viscous.Options" in line: - new_file += [ - "model.propagators.variat_viscous.options = model.propagators.variat_viscous.Options(model='full_q',\n", - ] - new_file += [ - " rho=model.mhd.density)\n", - ] - elif "variat_resist.Options" in line: - new_file += [ - "model.propagators.variat_resist.options = model.propagators.variat_resist.Options(model='full_q',\n", - ] - new_file += [ - " rho=model.mhd.density)\n", - ] - elif "sqrt_p.add_background" in line: - new_file += ["model.mhd.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - - -class ViscoResistiveLinearMHD_with_q(StruphyModel): + @staticmethod + def diagnostics_dct(): + dct = {} + + dct["div_u"] = "L2" + dct["u2"] = "Hdiv" + return dct + + __diagnostics__ = diagnostics_dct() + + +class ViscoresistiveLinearMHD_with_q(StruphyModel): r"""Linear visco-resistive MHD equations, with the q variable (square root of the pressure), discretized with a variational method. :ref:`normalization`: @@ -1769,101 +1753,138 @@ class ViscoResistiveLinearMHD_with_q(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.sqrt_p = FEECVariable(space="L2") - self.init_variables() - - class Diagnostics(DiagnosticSpecies): - def __init__(self): - self.div_u = FEECVariable(space="L2") - self.u2 = FEECVariable(space="Hdiv") - self.qt3 = FEECVariable(space="L2") - self.bt2 = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_qb = propagators_fields.VariationalQBEvolve() - if with_viscosity: - self.variat_viscous = propagators_fields.VariationalViscosity() - if with_resistivity: - self.variat_resist = propagators_fields.VariationalResistivity() - - ## abstract methods - - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.diagnostics = self.Diagnostics() - - # 2. instantiate all propagators - self.propagators = self.Propagators( - with_viscosity=with_viscosity, - with_resistivity=with_resistivity, - ) + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["em_fields"]["b2"] = "Hdiv" + dct["fluid"]["mhd"] = {"rho3": "L2", "q3": "L2", "uv": "H1vec"} + return dct + + @staticmethod + def bulk_species(): + return "mhd" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.mhd.density - self.propagators.variat_dens.variables.u = self.mhd.velocity - self.propagators.variat_qb.variables.u = self.mhd.velocity - self.propagators.variat_qb.variables.q = self.mhd.sqrt_p - self.propagators.variat_qb.variables.b = self.em_fields.b_field - if with_viscosity: - self.propagators.variat_viscous.variables.s = self.mhd.sqrt_p - self.propagators.variat_viscous.variables.u = self.mhd.velocity - if with_resistivity: - self.propagators.variat_resist.variables.s = self.mhd.sqrt_p - self.propagators.variat_resist.variables.b = self.em_fields.b_field - - # define scalars for update_scalar_quantities + @staticmethod + def velocity_scale(): + return "alfvén" + + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["mhd_rho3", "mhd_uv"], + propagators_fields.VariationalQBEvolve: ["mhd_q3", "b2", "mhd_uv"], + propagators_fields.VariationalViscosity: ["mhd_q3", "mhd_uv"], + propagators_fields.VariationalResistivity: ["mhd_q3", "b2"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import H1vecMassMatrix_density + from struphy.polar.basic import PolarVector + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalQBEvolve"]["lin_solver"] + nonlin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalQBEvolve"]["nonlin_solver"] + lin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["lin_solver"] + nonlin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["nonlin_solver"] + lin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["lin_solver"] + nonlin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["nonlin_solver"] + if "linearize_current" in params["fluid"]["mhd"]["options"]["VariationalResistivity"].keys(): + self._linearize_current = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["linearize_current"] + else: + self._linearize_current = False + self._gamma = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + self._mu = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu"] + self._mu_a = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu_a"] + self._alpha = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["alpha"] + self._eta = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta"] + self._eta_a = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta_a"] + model = "linear_q" + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + } + + self._kwargs[propagators_fields.VariationalQBEvolve] = { + "model": model, + "mass_ops": self.WMM, + "lin_solver": lin_solver_magfield, + "nonlin_solver": nonlin_solver_magfield, + "gamma": self._gamma, + "div_u": self.pointer["div_u"], + "u2": self.pointer["u2"], + "bt2": self.pointer["bt2"], + "qt3": self.pointer["qt3"], + } + + self._kwargs[propagators_fields.VariationalViscosity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "mu": self._mu, + "mu_a": self._mu_a, + "alpha": self._alpha, + "mass_ops": self.WMM, + "lin_solver": lin_solver_viscosity, + "nonlin_solver": nonlin_solver_viscosity, + "pt3": self.pointer["qt3"], + } + + self._kwargs[propagators_fields.VariationalResistivity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "eta": self._eta, + "eta_a": self._eta_a, + "lin_solver": lin_solver_resistivity, + "nonlin_solver": nonlin_solver_resistivity, + "linearize_current": self._linearize_current, + "pt3": self.pointer["qt3"], + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_U") - self.add_scalar("en_mag_1") - self.add_scalar("en_mag_2") - self.add_scalar("en_thermo_1") - self.add_scalar("en_thermo_2") + # self.add_scalar("en_thermo_1") + # self.add_scalar("en_thermo_2") + # self.add_scalar("en_mag_1") + # self.add_scalar("en_mag_2") self.add_scalar("en_tot") - @property - def bulk_species(self): - return self.mhd + # self.add_scalar("dens_tot") + # self.add_scalar("tot_div_B") - @property - def velocity_scale(self): - return "alfvén" + # self.add_scalar("en_tot_l1") + # self.add_scalar("en_thermo_l1") + # self.add_scalar("en_mag_l1") - def allocate_helpers(self): - projV3 = L2Projector("L2", self._mass_ops) - - def f(e1, e2, e3): - return 1 + # temporary vectors for scalar quantities + self._tmp_div_B = self.derham.Vh_pol["3"].zeros() + tmp_dof = self.derham.Vh_pol["3"].zeros() + projV3 = L2Projector("L2", self.mass_ops) - f = xp.vectorize(f) - self._integrator = projV3(f) + self._integrator = projV3(self.domain.jacobian_det, dofs=tmp_dof) self._ones = self.derham.Vh_pol["3"].zeros() if isinstance(self._ones, PolarVector): @@ -1871,94 +1892,56 @@ def f(e1, e2, e3): else: self._ones[:] = 1.0 - self._tmp_div_B = self.derham.Vh_pol["3"].zeros() - def update_scalar_quantities(self): - rho = self.mhd.density.spline.vector - u = self.mhd.velocity.spline.vector - q = self.mhd.sqrt_p.spline.vector - b = self.em_fields.b_field.spline.vector - bt2 = self.propagators.variat_qb.options.bt2.spline.vector - qt3 = self.propagators.variat_qb.options.qt3.spline.vector - - gamma = self.propagators.variat_qb.options.gamma - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + # Update mass matrix + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["mhd_uv"], self.pointer["mhd_uv"]) self.update_scalar("en_U", en_U) - en_mag1 = 0.5 * self.mass_ops.M2.dot_inner(b, b) - self.update_scalar("en_mag_1", en_mag1) + en_mag1 = self._mass_ops.M2.dot_inner(self.pointer["b2"], self.pointer["b2"]) + # self.update_scalar("en_mag_1", en_mag1) - en_mag2 = self.mass_ops.M2.dot_inner(bt2, self.projected_equil.b2) - self.update_scalar("en_mag_2", en_mag2) + en_mag2 = self._mass_ops.M2.dot_inner(self.pointer["bt2"], self.projected_equil.b2) + # self.update_scalar("en_mag_2", en_mag2) - en_th_1 = 1.0 / (gamma - 1.0) * self.mass_ops.M3.dot_inner(q, q) - self.update_scalar("en_thermo_1", en_th_1) + en_th_1 = 1 / (self._gamma - 1) * self._mass_ops.M3.dot_inner(self.pointer["mhd_q3"], self.pointer["mhd_q3"]) + # self.update_scalar("en_thermo_1", en_th_1) - en_th_2 = 2.0 / (gamma - 1.0) * self.mass_ops.M3.dot_inner(qt3, self.projected_equil.q3) - self.update_scalar("en_thermo_2", en_th_2) + en_th_2 = 2 / (self._gamma - 1) * self._mass_ops.M3.dot_inner(self.pointer["qt3"], self.projected_equil.q3) + # self.update_scalar("en_thermo_2", en_th_2) en_tot = en_U + en_th_1 + en_th_2 + en_mag1 + en_mag2 self.update_scalar("en_tot", en_tot) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='linear_q')\n", - ] - elif "variat_qb.Options" in line: - new_file += [ - "model.propagators.variat_qb.options = model.propagators.variat_qb.Options(model='linear_q',\n", - ] - new_file += [ - " div_u=model.diagnostics.div_u,\n", - ] - new_file += [ - " u2=model.diagnostics.u2,\n", - ] - new_file += [ - " qt3=model.diagnostics.qt3,\n", - ] - new_file += [ - " bt2=model.diagnostics.bt2)\n", - ] - elif "variat_viscous.Options" in line: - new_file += [ - "model.propagators.variat_viscous.options = model.propagators.variat_viscous.Options(model='linear_q',\n", - ] - new_file += [ - " rho=model.mhd.density,\n", - ] - new_file += [ - " pt3=model.diagnostics.qt3)\n", - ] - elif "variat_resist.Options" in line: - new_file += [ - "model.propagators.variat_resist.options = model.propagators.variat_resist.Options(model='linear_q',\n", - ] - new_file += [ - " rho=model.mhd.density,\n", - ] - new_file += [ - " pt3=model.diagnostics.qt3)\n", - ] - elif "sqrt_p.add_background" in line: - new_file += ["model.mhd.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - - -class ViscoResistiveDeltafMHD_with_q(StruphyModel): + # dens_tot = self._ones.dot(self.pointer["mhd_rho3"]) + # self.update_scalar("dens_tot", dens_tot) + + # div_B = self.derham.div.dot(self.pointer["b2"], out=self._tmp_div_B) + # L2_div_B = self._mass_ops.M3.dot_inner(div_B, div_B) + # self.update_scalar("tot_div_B", L2_div_B) + + # en_thermo_l1 = self._integrator.dot(self.mass_ops.M3.dot(self.pointer["mhd_p3"])) / (self._gamma - 1.0) + # self.update_scalar("en_thermo_l1", en_thermo_l1) + + # wb2 = self._mass_ops.M2.dot(self.pointer["b2"], out=self._tmp_wb2) + # en_mag_l1 = wb2.dot(self.projected_equil.b2) + # self.update_scalar("en_mag_l1", en_mag_l1) + + # en_tot_l1 = en_thermo_l1 + en_mag_l1 + # self.update_scalar("en_tot_l1", en_tot_l1) + + @staticmethod + def diagnostics_dct(): + dct = {} + dct["bt2"] = "Hdiv" + dct["qt3"] = "L2" + dct["div_u"] = "L2" + dct["u2"] = "Hdiv" + return dct + + __diagnostics__ = diagnostics_dct() + + +class ViscoresistiveDeltafMHD_with_q(StruphyModel): r"""Linear visco-resistive MHD equations discretized with a variational method. :ref:`normalization`: @@ -1992,103 +1975,147 @@ class ViscoResistiveDeltafMHD_with_q(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.sqrt_p = FEECVariable(space="L2") - self.init_variables() - - class Diagnostics(DiagnosticSpecies): - def __init__(self): - self.div_u = FEECVariable(space="L2") - self.u2 = FEECVariable(space="Hdiv") - self.qt3 = FEECVariable(space="L2") - self.bt2 = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - self.variat_qb = propagators_fields.VariationalQBEvolve() - if with_viscosity: - self.variat_viscous = propagators_fields.VariationalViscosity() - if with_resistivity: - self.variat_resist = propagators_fields.VariationalResistivity() - - ## abstract methods - - def __init__( - self, - with_viscosity: bool = True, - with_resistivity: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.diagnostics = self.Diagnostics() - - # 2. instantiate all propagators - self.propagators = self.Propagators( - with_viscosity=with_viscosity, - with_resistivity=with_resistivity, - ) + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["em_fields"]["b2"] = "Hdiv" + dct["fluid"]["mhd"] = {"rho3": "L2", "q3": "L2", "uv": "H1vec"} + return dct + + @staticmethod + def bulk_species(): + return "mhd" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.mhd.density - self.propagators.variat_dens.variables.u = self.mhd.velocity - self.propagators.variat_mom.variables.u = self.mhd.velocity - self.propagators.variat_qb.variables.u = self.mhd.velocity - self.propagators.variat_qb.variables.q = self.mhd.sqrt_p - self.propagators.variat_qb.variables.b = self.em_fields.b_field - if with_viscosity: - self.propagators.variat_viscous.variables.s = self.mhd.sqrt_p - self.propagators.variat_viscous.variables.u = self.mhd.velocity - if with_resistivity: - self.propagators.variat_resist.variables.s = self.mhd.sqrt_p - self.propagators.variat_resist.variables.b = self.em_fields.b_field - - # define scalars for update_scalar_quantities + @staticmethod + def velocity_scale(): + return "alfvén" + + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["mhd_rho3", "mhd_uv"], + propagators_fields.VariationalMomentumAdvection: ["mhd_uv"], + propagators_fields.VariationalQBEvolve: ["mhd_q3", "b2", "mhd_uv"], + propagators_fields.VariationalViscosity: ["mhd_q3", "mhd_uv"], + propagators_fields.VariationalResistivity: ["mhd_q3", "b2"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import H1vecMassMatrix_density + from struphy.polar.basic import PolarVector + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["mhd"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalQBEvolve"]["lin_solver"] + nonlin_solver_magfield = params["fluid"]["mhd"]["options"]["VariationalQBEvolve"]["nonlin_solver"] + lin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["lin_solver"] + nonlin_solver_viscosity = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["nonlin_solver"] + lin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["lin_solver"] + nonlin_solver_resistivity = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["nonlin_solver"] + if "linearize_current" in params["fluid"]["mhd"]["options"]["VariationalResistivity"].keys(): + self._linearize_current = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["linearize_current"] + else: + self._linearize_current = False + self._gamma = params["fluid"]["mhd"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + self._mu = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu"] + self._mu_a = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["mu_a"] + self._alpha = params["fluid"]["mhd"]["options"]["VariationalViscosity"]["physics"]["alpha"] + self._eta = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta"] + self._eta_a = params["fluid"]["mhd"]["options"]["VariationalResistivity"]["physics"]["eta_a"] + model = "deltaf_q" + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + self._kwargs[propagators_fields.VariationalQBEvolve] = { + "model": model, + "mass_ops": self.WMM, + "lin_solver": lin_solver_magfield, + "nonlin_solver": nonlin_solver_magfield, + "gamma": self._gamma, + "div_u": self.pointer["div_u"], + "u2": self.pointer["u2"], + "bt2": self.pointer["bt2"], + "qt3": self.pointer["qt3"], + } + + self._kwargs[propagators_fields.VariationalViscosity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "mu": self._mu, + "mu_a": self._mu_a, + "alpha": self._alpha, + "mass_ops": self.WMM, + "lin_solver": lin_solver_viscosity, + "nonlin_solver": nonlin_solver_viscosity, + "pt3": self.pointer["qt3"], + } + + self._kwargs[propagators_fields.VariationalResistivity] = { + "model": model, + "rho": self.pointer["mhd_rho3"], + "gamma": self._gamma, + "eta": self._eta, + "eta_a": self._eta_a, + "lin_solver": lin_solver_resistivity, + "nonlin_solver": nonlin_solver_resistivity, + "linearize_current": self._linearize_current, + "pt3": self.pointer["qt3"], + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_U") - self.add_scalar("en_mag_1") - self.add_scalar("en_mag_2") self.add_scalar("en_thermo_1") self.add_scalar("en_thermo_2") + self.add_scalar("en_mag_1") + self.add_scalar("en_mag_2") self.add_scalar("en_tot") - @property - def bulk_species(self): - return self.mhd + # self.add_scalar("dens_tot") + # self.add_scalar("tot_div_B") - @property - def velocity_scale(self): - return "alfvén" + # self.add_scalar("en_tot_l1") + # self.add_scalar("en_thermo_l1") + # self.add_scalar("en_mag_l1") - def allocate_helpers(self): - projV3 = L2Projector("L2", self._mass_ops) - - def f(e1, e2, e3): - return 1 + # temporary vectors for scalar quantities + self._tmp_div_B = self.derham.Vh_pol["3"].zeros() + tmp_dof = self.derham.Vh_pol["3"].zeros() + projV3 = L2Projector("L2", self.mass_ops) - f = xp.vectorize(f) - self._integrator = projV3(f) + self._integrator = projV3(self.domain.jacobian_det, dofs=tmp_dof) self._ones = self.derham.Vh_pol["3"].zeros() if isinstance(self._ones, PolarVector): @@ -2096,95 +2123,57 @@ def f(e1, e2, e3): else: self._ones[:] = 1.0 - self._tmp_div_B = self.derham.Vh_pol["3"].zeros() - def update_scalar_quantities(self): - rho = self.mhd.density.spline.vector - u = self.mhd.velocity.spline.vector - q = self.mhd.sqrt_p.spline.vector - b = self.em_fields.b_field.spline.vector - bt2 = self.propagators.variat_qb.options.bt2.spline.vector - qt3 = self.propagators.variat_qb.options.qt3.spline.vector - - gamma = self.propagators.variat_qb.options.gamma - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + # Update mass matrix + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["mhd_uv"], self.pointer["mhd_uv"]) self.update_scalar("en_U", en_U) - en_mag1 = 0.5 * self.mass_ops.M2.dot_inner(b, b) + en_mag1 = 0.5 * self._mass_ops.M2.dot_inner(self.pointer["b2"], self.pointer["b2"]) self.update_scalar("en_mag_1", en_mag1) - en_mag2 = self.mass_ops.M2.dot_inner(bt2, self.projected_equil.b2) + en_mag2 = 0.5 * self._mass_ops.M2.dot_inner(self.pointer["bt2"], self.projected_equil.b2) self.update_scalar("en_mag_2", en_mag2) - en_th_1 = 1.0 / (gamma - 1.0) * self.mass_ops.M3.dot_inner(q, q) + en_th_1 = 1 / (self._gamma - 1) * self._mass_ops.M3.dot_inner(self.pointer["mhd_q3"], self.pointer["mhd_q3"]) self.update_scalar("en_thermo_1", en_th_1) - en_th_2 = 2.0 / (gamma - 1.0) * self.mass_ops.M3.dot_inner(qt3, self.projected_equil.q3) + en_th_2 = 2 / (self._gamma - 1) * self._mass_ops.M3.dot_inner(self.pointer["qt3"], self.projected_equil.q3) self.update_scalar("en_thermo_2", en_th_2) en_tot = en_U + en_th_1 + en_th_2 + en_mag1 + en_mag2 self.update_scalar("en_tot", en_tot) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='deltaf_q')\n", - ] - elif "variat_qb.Options" in line: - new_file += [ - "model.propagators.variat_qb.options = model.propagators.variat_qb.Options(model='deltaf_q',\n", - ] - new_file += [ - " div_u=model.diagnostics.div_u,\n", - ] - new_file += [ - " u2=model.diagnostics.u2,\n", - ] - new_file += [ - " qt3=model.diagnostics.qt3,\n", - ] - new_file += [ - " bt2=model.diagnostics.bt2)\n", - ] - elif "variat_viscous.Options" in line: - new_file += [ - "model.propagators.variat_viscous.options = model.propagators.variat_viscous.Options(model='deltaf_q',\n", - ] - new_file += [ - " rho=model.mhd.density,\n", - ] - new_file += [ - " pt3=model.diagnostics.qt3)\n", - ] - elif "variat_resist.Options" in line: - new_file += [ - "model.propagators.variat_resist.options = model.propagators.variat_resist.Options(model='deltaf_q',\n", - ] - new_file += [ - " rho=model.mhd.density,\n", - ] - new_file += [ - " pt3=model.diagnostics.qt3)\n", - ] - elif "sqrt_p.add_background" in line: - new_file += ["model.mhd.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - - -class EulerSPH(StruphyModel): - r"""Euler equations discretized with smoothed particle hydrodynamics (SPH). + # dens_tot = self._ones.dot(self.pointer["mhd_rho3"]) + # self.update_scalar("dens_tot", dens_tot) + + # div_B = self.derham.div.dot(self.pointer["b2"], out=self._tmp_div_B) + # L2_div_B = self._mass_ops.M3.dot_inner(div_B, div_B) + # self.update_scalar("tot_div_B", L2_div_B) + + # en_thermo_l1 = self._integrator.dot(self.mass_ops.M3.dot(self.pointer["mhd_p3"])) / (self._gamma - 1.0) + # self.update_scalar("en_thermo_l1", en_thermo_l1) + + # wb2 = self._mass_ops.M2.dot(self.pointer["b2"], out=self._tmp_wb2) + # en_mag_l1 = wb2.dot(self.projected_equil.b2) + # self.update_scalar("en_mag_l1", en_mag_l1) + + # en_tot_l1 = en_thermo_l1 + en_mag_l1 + # self.update_scalar("en_tot_l1", en_tot_l1) + + @staticmethod + def diagnostics_dct(): + dct = {} + dct["bt2"] = "Hdiv" + dct["qt3"] = "L2" + dct["div_u"] = "L2" + dct["u2"] = "Hdiv" + return dct + + __diagnostics__ = diagnostics_dct() + + +class IsothermalEulerSPH(StruphyModel): + r"""Isothermal Euler equations discretized with smoothed particle hydrodynamics (SPH). :ref:`normalization`: @@ -2204,71 +2193,155 @@ class EulerSPH(StruphyModel): \partial_t S + \mathbf u \cdot \nabla S &= 0\,, \end{align} - where :math:`S` denotes the entropy per unit mass. - The internal energy per unit mass can be defined in two ways: + where :math:`S` denotes the entropy per unit mass and the internal energy per unit mass is .. math:: - \mathrm{"isothermal:"}\qquad &\mathcal U(\rho, S) = \kappa(S) \log \rho\,. - - \mathrm{"polytropic:"}\qquad &\mathcal U(\rho, S) = \kappa(S) \frac{\rho^{\gamma - 1}}{\gamma - 1}\,. + \mathcal U(\rho, S) = \kappa(S) \log \rho\,. :ref:`propagators` (called in sequence): 1. :class:`~struphy.propagators.propagators_markers.PushEta` 2. :class:`~struphy.propagators.propagators_markers.PushVxB` 3. :class:`~struphy.propagators.propagators_markers.PushVinSPHpressure` + + :ref:`Model info `: """ - ## species + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class EulerFluid(ParticleSpecies): - def __init__(self): - self.var = SPHVariable() - self.init_variables() + dct["kinetic"]["euler_fluid"] = "ParticlesSPH" + return dct - ## propagators + @staticmethod + def bulk_species(): + return "euler_fluid" + + @staticmethod + def velocity_scale(): + return "thermal" + + # @staticmethod + # def diagnostics_dct(): + # dct = {} + # dct["projected_density"] = "L2" + # return dct - class Propagators: - def __init__(self, with_B0: bool = True): - self.push_eta = propagators_markers.PushEta() - if with_B0: - self.push_vxb = propagators_markers.PushVxB() - self.push_sph_p = propagators_markers.PushVinSPHpressure() + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushEta: ["euler_fluid"], + # propagators_markers.PushVxB: ["euler_fluid"], + propagators_markers.PushVinSPHpressure: ["euler_fluid"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + super().__init__(params, comm=comm, clone_config=clone_config) + + # prelim + _p = params["kinetic"]["euler_fluid"] + algo_eta = _p["options"]["PushEta"]["algo"] + # algo_vxb = _p["options"]["PushVxB"]["algo"] + kernel_type = _p["options"]["PushVinSPHpressure"]["kernel_type"] + algo_sph = _p["options"]["PushVinSPHpressure"]["algo"] + gravity = _p["options"]["PushVinSPHpressure"]["gravity"] + thermodynamics = _p["options"]["PushVinSPHpressure"]["thermodynamics"] + + # magnetic field + # self._b_eq = self.projected_equil.b2 + + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushEta] = { + "algo": algo_eta, + # "density_field": self.pointer["projected_density"], + } + + # self._kwargs[propagators_markers.PushVxB] = { + # "algo": algo_vxb, + # "kappa": 1.0, + # "b2": self._b_eq, + # "b2_add": None, + # } + + self._kwargs[propagators_markers.PushVinSPHpressure] = { + "kernel_type": kernel_type, + "algo": algo_sph, + "gravity": gravity, + "thermodynamics": thermodynamics, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation + self.add_scalar("en_kin", compute="from_sph", species="euler_fluid") - ## abstract methods + def update_scalar_quantities(self): + valid_markers = self.pointer["euler_fluid"].markers_wo_holes_and_ghost + en_kin = valid_markers[:, 6].dot( + valid_markers[:, 3] ** 2 + valid_markers[:, 4] ** 2 + valid_markers[:, 5] ** 2 + ) / (2.0 * self.pointer["euler_fluid"].Np) + self.update_scalar("en_kin", en_kin) - def __init__(self, with_B0: bool = True): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - self.with_B0 = with_B0 +class ViscousEulerSPH(StruphyModel): + r"""Isothermal Euler equations discretized with smoothed particle hydrodynamics (SPH). - # 1. instantiate all species - self.euler_fluid = self.EulerFluid() + :ref:`normalization`: - # 2. instantiate all propagators - self.propagators = self.Propagators(with_B0=with_B0) + .. math:: - # 3. assign variables to propagators - self.propagators.push_eta.variables.var = self.euler_fluid.var - if with_B0: - self.propagators.push_vxb.variables.ions = self.euler_fluid.var - self.propagators.push_sph_p.variables.fluid = self.euler_fluid.var + \hat u = \hat v_\textnormal{th} \,. - # define scalars for update_scalar_quantities - self.add_scalar("en_kin", compute="from_sph", variable=self.euler_fluid.var) + :ref:`Equations `: - @property - def bulk_species(self): - return self.euler_fluid + .. math:: - @property - def velocity_scale(self): - return "thermal" + \begin{align} + \partial_t \rho + \nabla \cdot (\rho \mathbf u) &= 0\,, + \\[2mm] + \rho(\partial_t \mathbf u + \mathbf u \cdot \nabla \mathbf u) &= - \nabla \left(\rho^2 \frac{\partial \mathcal U(\rho, S)}{\partial \rho} \right)\,, + \\[2mm] + \partial_t S + \mathbf u \cdot \nabla S &= 0\,, + \end{align} - def allocate_helpers(self): - pass + where :math:`S` denotes the entropy per unit mass and the internal energy per unit mass is + + .. math:: + + \mathcal U(\rho, S) = \kappa(S) \log \rho\,. + + :ref:`propagators` (called in sequence): + + 1. :class:`~struphy.propagators.propagators_markers.PushEta` + 2. :class:`~struphy.propagators.propagators_markers.PushVinSPHpressure` + + :ref:`Model info `: + """ + + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + + dct["kinetic"]["euler_fluid"] = "ParticlesSPH" + return dct + + @staticmethod + def bulk_species(): + return "euler_fluid" + + @staticmethod + def velocity_scale(): + return "thermal" # @staticmethod # def diagnostics_dct(): @@ -2276,35 +2349,66 @@ def allocate_helpers(self): # dct["projected_density"] = "L2" # return dct + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushEta: ["euler_fluid"], + propagators_markers.PushVinSPHpressure: ["euler_fluid"], + propagators_markers.PushVinViscousPotential: ["euler_fluid"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + super().__init__(params, comm=comm, clone_config=clone_config) + + # prelim + _p = params["kinetic"]["euler_fluid"] + algo_eta = _p["options"]["PushEta"]["algo"] + kernel_type_1 = _p["options"]["PushVinSPHpressure"]["kernel_type"] + algo_sph = _p["options"]["PushVinSPHpressure"]["algo"] + gravity = _p["options"]["PushVinSPHpressure"]["gravity"] + thermodynamics = _p["options"]["PushVinSPHpressure"]["thermodynamics"] + kernel_type_2 = _p["options"]["PushVinViscousPotential"]["kernel_type"] + kernel_width = _p["options"]["PushVinViscousPotential"]["kernel_width"] + + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushEta] = { + "algo": algo_eta, + # "density_field": self.pointer["projected_density"], + } + + self._kwargs[propagators_markers.PushVinSPHpressure] = { + "kernel_type": kernel_type_1, + "algo": algo_sph, + "gravity": gravity, + "thermodynamics": thermodynamics, + } + + self._kwargs[propagators_markers.PushVinViscousPotential] = { + "kernel_type": kernel_type_2, + "kernel_width": kernel_width, + "algo": algo_sph, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation + self.add_scalar("en_kin", compute="from_sph", species="euler_fluid") + def update_scalar_quantities(self): - particles = self.euler_fluid.var.particles - valid_markers = particles.markers_wo_holes_and_ghost + valid_markers = self.pointer["euler_fluid"].markers_wo_holes_and_ghost en_kin = valid_markers[:, 6].dot( - valid_markers[:, 3] ** 2 + valid_markers[:, 4] ** 2 + valid_markers[:, 5] ** 2, - ) / (2.0 * particles.Np) + valid_markers[:, 3] ** 2 + valid_markers[:, 4] ** 2 + valid_markers[:, 5] ** 2 + ) / (2.0 * self.pointer["euler_fluid"].Np) self.update_scalar("en_kin", en_kin) - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "push_vxb.Options" in line: - new_file += ["if model.with_B0:\n"] - new_file += [" " + line] - elif "set_save_data" in line: - new_file += ["\nkd_plot = KernelDensityPlot()\n"] - new_file += ["model.euler_fluid.set_save_data(kernel_density_plots=(kd_plot,))\n"] - elif "base_units = BaseUnits" in line: - new_file += ["base_units = BaseUnits(kBT=1.0)\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class HasegawaWakatani(StruphyModel): r"""Hasegawa-Wakatani equations in 2D. @@ -2336,99 +2440,119 @@ class HasegawaWakatani(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.phi = FEECVariable(space="H1") - self.init_variables() - - class Plasma(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="H1") - self.vorticity = FEECVariable(space="H1") - self.init_variables() + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - ## propagators + dct["em_fields"] = {"phi0": "H1"} + dct["fluid"]["hw"] = { + "n0": "H1", + "omega0": "H1", + } + return dct - class Propagators: - def __init__(self): - self.poisson = propagators_fields.Poisson() - self.hw = propagators_fields.HasegawaWakatani() + @staticmethod + def bulk_species(): + return "hw" - ## abstract methods - - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.plasma = self.Plasma() - - # 2. instantiate all propagators - self.propagators = self.Propagators() - - # 3. assign variables to propagators - self.propagators.poisson.variables.phi = self.em_fields.phi - self.propagators.hw.variables.n = self.plasma.density - self.propagators.hw.variables.omega = self.plasma.vorticity - - # define scalars for update_scalar_quantities - - @property - def bulk_species(self): - return self.plasma - - @property - def velocity_scale(self): + @staticmethod + def velocity_scale(): return "alfvén" - def allocate_helpers(self): - self._rho: StencilVector = self.derham.Vh["0"].zeros() + # @staticmethod + # def diagnostics_dct(): + # dct = {} + # dct["projected_density"] = "L2" + # return dct + + @staticmethod + def propagators_dct(): + return { + propagators_fields.Poisson: ["phi0"], + propagators_fields.HasegawaWakatani: ["hw_n0", "hw_omega0"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + from struphy.polar.basic import PolarVector + + # extract necessary parameters + self._stab_eps = params["em_fields"]["options"]["Poisson"]["stabilization"]["stab_eps"] + self._stab_mat = params["em_fields"]["options"]["Poisson"]["stabilization"]["stab_mat"] + self._solver = params["em_fields"]["options"]["Poisson"]["solver"] + c_fun = params["fluid"]["hw"]["options"]["HasegawaWakatani"]["c_fun"] + kappa = params["fluid"]["hw"]["options"]["HasegawaWakatani"]["kappa"] + nu = params["fluid"]["hw"]["options"]["HasegawaWakatani"]["nu"] + algo = params["fluid"]["hw"]["options"]["HasegawaWakatani"]["algo"] + M0_solver = params["fluid"]["hw"]["options"]["HasegawaWakatani"]["M0_solver"] + + # rhs of Poisson + self._rho = self.derham.Vh["0"].zeros() self.update_rho() + # set keyword arguments for propagators + self._kwargs[propagators_fields.Poisson] = { + "stab_eps": self._stab_eps, + "stab_mat": self._stab_mat, + "rho": self.update_rho, + "solver": self._solver, + } + + self._kwargs[propagators_fields.HasegawaWakatani] = { + "phi": self.em_fields["phi0"]["obj"], + "c_fun": c_fun, + "kappa": kappa, + "nu": nu, + "algo": algo, + "M0_solver": M0_solver, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + def update_rho(self): - omega = self.plasma.vorticity.spline.vector - self._rho = self.mass_ops.M0.dot(omega, out=self._rho) + self._rho = self.mass_ops.M0.dot(self.pointer["hw_omega0"], out=self._rho) self._rho.update_ghost_regions() return self._rho - def allocate_propagators(self): + def initialize_from_params(self): """Solve initial Poisson equation. :meta private: """ # initialize fields and particles - super().allocate_propagators() + super().initialize_from_params() - if MPI.COMM_WORLD.Get_rank() == 0: + if self.rank_world == 0: print("\nINITIAL POISSON SOLVE:") + # Instantiate Poisson solver + poisson_solver = propagators_fields.Poisson( + self.pointer["phi0"], + stab_eps=self._stab_eps, + stab_mat=self._stab_mat, + rho=self._rho, + solver=self._solver, + ) + + # Solve with dt=1. and compute electric field + if self.rank_world == 0: + print("\nSolving initial Poisson problem...") + self.update_rho() - self.propagators.poisson(1.0) + poisson_solver(1.0) - if MPI.COMM_WORLD.Get_rank() == 0: + if self.rank_world == 0: print("Done.") def update_scalar_quantities(self): pass - - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "hw.Options" in line: - new_file += [ - "model.propagators.hw.options = model.propagators.hw.Options(phi=model.em_fields.phi)\n", - ] - elif "vorticity.add_background" in line: - new_file += ["model.plasma.density.add_background(FieldsBackground())\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) diff --git a/src/struphy/models/hybrid.py b/src/struphy/models/hybrid.py index c1952f59c..539f4d285 100644 --- a/src/struphy/models/hybrid.py +++ b/src/struphy/models/hybrid.py @@ -1,17 +1,9 @@ -import cunumpy as xp -from psydac.ddm.mpi import mpi as MPI - from struphy.models.base import StruphyModel -from struphy.models.species import FieldSpecies, FluidSpecies, ParticleSpecies -from struphy.models.variables import FEECVariable, PICVariable, SPHVariable, Variable from struphy.pic.accumulation import accum_kernels, accum_kernels_gc -from struphy.pic.accumulation.particles_to_grid import AccumulatorVector -from struphy.polar.basic import PolarVector from struphy.propagators import propagators_coupling, propagators_fields, propagators_markers +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel -rank = MPI.COMM_WORLD.Get_rank() - class LinearMHDVlasovCC(StruphyModel): r""" @@ -69,109 +61,182 @@ class LinearMHDVlasovCC(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="Hdiv") - self.pressure = FEECVariable(space="L2") - self.init_variables() - - class EnergeticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles6D") - self.init_variables() - - ## propagators + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class Propagators: - def __init__(self): - self.couple_dens = propagators_fields.CurrentCoupling6DDensity() - self.shear_alf = propagators_fields.ShearAlfven() - self.couple_curr = propagators_coupling.CurrentCoupling6DCurrent() - self.push_eta = propagators_markers.PushEta() - self.push_vxb = propagators_markers.PushVxB() - self.mag_sonic = propagators_fields.Magnetosonic() + dct["em_fields"]["b_field"] = "Hdiv" + dct["fluid"]["mhd"] = {"density": "L2", "velocity": "Hdiv", "pressure": "L2"} + dct["kinetic"]["energetic_ions"] = "Particles6D" + return dct - ## abstract methods + @staticmethod + def bulk_species(): + return "mhd" - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def velocity_scale(): + return "alfvén" - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.energetic_ions = self.EnergeticIons() + @staticmethod + def propagators_dct(): + return { + propagators_fields.CurrentCoupling6DDensity: ["mhd_velocity"], + propagators_fields.ShearAlfven: ["mhd_velocity", "b_field"], + propagators_coupling.CurrentCoupling6DCurrent: ["energetic_ions", "mhd_velocity"], + propagators_markers.PushEta: ["energetic_ions"], + propagators_markers.PushVxB: ["energetic_ions"], + propagators_fields.Magnetosonic: ["mhd_density", "mhd_velocity", "mhd_pressure"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["fluid", "mhd"], + key="u_space", + option="Hdiv", + dct=dct, + ) + return dct - # 2. instantiate all propagators - self.propagators = self.Propagators() + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - # 3. assign variables to propagators - self.propagators.couple_dens.variables.u = self.mhd.velocity + from struphy.polar.basic import PolarVector - self.propagators.shear_alf.variables.u = self.mhd.velocity - self.propagators.shear_alf.variables.b = self.em_fields.b_field + # prelim + e_ions_params = self.kinetic["energetic_ions"]["params"] - self.propagators.couple_curr.variables.ions = self.energetic_ions.var - self.propagators.couple_curr.variables.u = self.mhd.velocity + # extract necessary parameters + u_space = params["fluid"]["mhd"]["options"]["u_space"] + params_alfven = params["fluid"]["mhd"]["options"]["ShearAlfven"] + params_sonic = params["fluid"]["mhd"]["options"]["Magnetosonic"] + params_eta = params["kinetic"]["energetic_ions"]["options"]["PushEta"] + params_vxb = params["kinetic"]["energetic_ions"]["options"]["PushVxB"] + params_density = params["fluid"]["mhd"]["options"]["CurrentCoupling6DDensity"] + params_current = params["kinetic"]["energetic_ions"]["options"]["CurrentCoupling6DCurrent"] - self.propagators.push_eta.variables.var = self.energetic_ions.var - self.propagators.push_vxb.variables.ions = self.energetic_ions.var + # compute coupling parameters + Ab = params["fluid"]["mhd"]["phys_params"]["A"] + Ah = params["kinetic"]["energetic_ions"]["phys_params"]["A"] + epsilon = self.equation_params["energetic_ions"]["epsilon"] - self.propagators.mag_sonic.variables.n = self.mhd.density - self.propagators.mag_sonic.variables.u = self.mhd.velocity - self.propagators.mag_sonic.variables.p = self.mhd.pressure + if abs(epsilon - 1) < 1e-6: + epsilon = 1.0 - # define scalars for update_scalar_quantities - self.add_scalar("en_U", compute="from_field") - self.add_scalar("en_p", compute="from_field") - self.add_scalar("en_B", compute="from_field") - self.add_scalar("en_f", compute="from_particles", variable=self.energetic_ions.var) - self.add_scalar("en_tot", summands=["en_U", "en_p", "en_B", "en_f"]) - self.add_scalar("n_lost_particles", compute="from_particles", variable=self.energetic_ions.var) + self._Ab = Ab + self._Ah = Ah - @property - def bulk_species(self): - return self.mhd - - @property - def velocity_scale(self): - return "alfvén" + # add control variate to mass_ops object + if self.pointer["energetic_ions"].control_variate: + self.mass_ops.weights["f0"] = self.pointer["energetic_ions"].f0 + + # project background magnetic field (2-form) and background pressure (3-form) + self._b_eq = self.derham.P["2"]( + [ + self.equil.b2_1, + self.equil.b2_2, + self.equil.b2_3, + ] + ) + self._p_eq = self.derham.P["3"](self.equil.p3) + self._ones = self._p_eq.space.zeros() - def allocate_helpers(self): - self._ones = self.projected_equil.p3.space.zeros() if isinstance(self._ones, PolarVector): self._ones.tp[:] = 1.0 else: self._ones[:] = 1.0 - self._tmp = xp.empty(1, dtype=float) - self._n_lost_particles = xp.empty(1, dtype=float) + # set keyword arguments for propagators + if params_density["turn_off"]: + self._kwargs[propagators_fields.CurrentCoupling6DDensity] = None + else: + self._kwargs[propagators_fields.CurrentCoupling6DDensity] = { + "particles": self.pointer["energetic_ions"], + "u_space": u_space, + "b_eq": self._b_eq, + "b_tilde": self.pointer["b_field"], + "Ab": Ab, + "Ah": Ah, + "epsilon": epsilon, + "solver": params_density["solver"], + "filter": params_density["filter"], + "boundary_cut": params_density["boundary_cut"], + } + + if params_alfven["turn_off"]: + self._kwargs[propagators_fields.ShearAlfven] = None + else: + self._kwargs[propagators_fields.ShearAlfven] = { + "u_space": u_space, + "solver": params_alfven["solver"], + } - # add control variate to mass_ops object - if self.energetic_ions.var.particles.control_variate: - self.mass_ops.weights["f0"] = self.energetic_ions.var.particles.f0 + if params_current["turn_off"]: + self._kwargs[propagators_coupling.CurrentCoupling6DCurrent] = None + else: + self._kwargs[propagators_coupling.CurrentCoupling6DCurrent] = { + "u_space": u_space, + "b_eq": self._b_eq, + "b_tilde": self.pointer["b_field"], + "Ab": Ab, + "Ah": Ah, + "epsilon": epsilon, + "solver": params_current["solver"], + "filter": params_current["filter"], + "boundary_cut": params_current["boundary_cut"], + } + + self._kwargs[propagators_markers.PushEta] = { + "algo": params_eta["algo"], + } + + self._kwargs[propagators_markers.PushVxB] = { + "algo": params_vxb["algo"], + "kappa": 1.0 / epsilon, + "b2": self.pointer["b_field"], + "b2_add": self._b_eq, + } + + if params_sonic["turn_off"]: + self._kwargs[propagators_fields.Magnetosonic] = None + else: + self._kwargs[propagators_fields.Magnetosonic] = { + "u_space": u_space, + "b": self.pointer["b_field"], + "solver": params_sonic["solver"], + } - self._Ah = self.energetic_ions.mass_number - self._Ab = self.mhd.mass_number + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation: + self.add_scalar("en_U", compute="from_field") + self.add_scalar("en_p", compute="from_field") + self.add_scalar("en_B", compute="from_field") + self.add_scalar("en_f", compute="from_particles", species="energetic_ions") + self.add_scalar("en_tot", summands=["en_U", "en_p", "en_B", "en_f"]) + self.add_scalar("n_lost_particles", compute="from_particles", species="energetic_ions") + + # temporary vectors for scalar quantities: + self._tmp = np.empty(1, dtype=float) + self._n_lost_particles = np.empty(1, dtype=float) def update_scalar_quantities(self): # perturbed fields - u = self.mhd.velocity.spline.vector - p = self.mhd.pressure.spline.vector - b = self.em_fields.b_field.spline.vector - particles = self.energetic_ions.var.particles - - en_U = 0.5 * self.mass_ops.M2n.dot_inner(u, u) - en_B = 0.5 * self.mass_ops.M2.dot_inner(b, b) - en_p = p.inner(self._ones) / (5 / 3 - 1) + en_U = 0.5 * self.mass_ops.M2n.dot_inner(self.pointer["mhd_velocity"], self.pointer["mhd_velocity"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) + en_p = self.pointer["mhd_pressure"].inner(self._ones) / (5 / 3 - 1) self.update_scalar("en_U", en_U) self.update_scalar("en_B", en_B) @@ -181,10 +246,12 @@ def update_scalar_quantities(self): self._tmp[0] = ( self._Ah / self._Ab - * particles.markers_wo_holes[:, 6].dot( - particles.markers_wo_holes[:, 3] ** 2 - + particles.markers_wo_holes[:, 4] ** 2 - + particles.markers_wo_holes[:, 5] ** 2, + * self.pointer["energetic_ions"] + .markers_wo_holes[:, 6] + .dot( + self.pointer["energetic_ions"].markers_wo_holes[:, 3] ** 2 + + self.pointer["energetic_ions"].markers_wo_holes[:, 4] ** 2 + + self.pointer["energetic_ions"].markers_wo_holes[:, 5] ** 2, ) / (2) ) @@ -193,47 +260,16 @@ def update_scalar_quantities(self): self.update_scalar("en_tot", en_U + en_B + en_p + self._tmp[0]) # Print number of lost ions - self._n_lost_particles[0] = particles.n_lost_markers + self._n_lost_particles[0] = self.pointer["energetic_ions"].n_lost_markers self.update_scalar("n_lost_particles", self._n_lost_particles[0]) - if rank == 0: + if self.rank_world == 0: print( "ratio of lost particles: ", - self._n_lost_particles[0] / particles.Np * 100, + self._n_lost_particles[0] / self.pointer["energetic_ions"].Np * 100, "%", ) - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "mag_sonic.Options" in line: - new_file += [ - "model.propagators.mag_sonic.options = model.propagators.mag_sonic.Options(b_field=model.em_fields.b_field)\n", - ] - elif "couple_dens.Options" in line: - new_file += [ - "model.propagators.couple_dens.options = model.propagators.couple_dens.Options(energetic_ions=model.energetic_ions.var,\n", - ] - new_file += [ - " b_tilde=model.em_fields.b_field)\n", - ] - elif "couple_curr.Options" in line: - new_file += [ - "model.propagators.couple_curr.options = model.propagators.couple_curr.Options(b_tilde=model.em_fields.b_field)\n", - ] - elif "set_save_data" in line: - new_file += ["\nbinplot = BinningPlot(slice='e1', n_bins=128, ranges=(0.0, 1.0))\n"] - new_file += ["model.energetic_ions.set_save_data(binning_plots=(binplot,))\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class LinearMHDVlasovPC(StruphyModel): r""" @@ -297,192 +333,209 @@ class LinearMHDVlasovPC(StruphyModel): :ref:`Model info `: """ - ## species - class EnergeticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles6D") - self.init_variables() - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.pressure = FEECVariable(space="L2") - self.velocity = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self, turn_off: tuple[str, ...] = (None,)): - if "PushEtaPC" not in turn_off: - self.push_eta_pc = propagators_markers.PushEtaPC() - if "PushVxB" not in turn_off: - self.push_vxb = propagators_markers.PushVxB() - if "PressureCoupling6D" not in turn_off: - self.pc6d = propagators_coupling.PressureCoupling6D() - if "ShearAlfven" not in turn_off: - self.shearalfven = propagators_fields.ShearAlfven() - if "Magnetosonic" not in turn_off: - self.magnetosonic = propagators_fields.Magnetosonic() - - def __init__(self, turn_off: tuple[str, ...] = (None,)): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.energetic_ions = self.EnergeticIons() - - # 2. instantiate all propagators - self.propagators = self.Propagators(turn_off) - - # 3. assign variables to propagators - if "ShearAlfven" not in turn_off: - self.propagators.shearalfven.variables.u = self.mhd.velocity - self.propagators.shearalfven.variables.b = self.em_fields.b_field - if "Magnetosonic" not in turn_off: - self.propagators.magnetosonic.variables.n = self.mhd.density - self.propagators.magnetosonic.variables.u = self.mhd.velocity - self.propagators.magnetosonic.variables.p = self.mhd.pressure - if "PressureCoupling6D" not in turn_off: - self.propagators.pc6d.variables.u = self.mhd.velocity - self.propagators.pc6d.variables.energetic_ions = self.energetic_ions.var - if "PushEtaPC" not in turn_off: - self.propagators.push_eta_pc.variables.var = self.energetic_ions.var - if "PushVxB" not in turn_off: - self.propagators.push_vxb.variables.ions = self.energetic_ions.var - - # define scalars for update_scalar_quantities - self.add_scalar("en_U") - self.add_scalar("en_p") - self.add_scalar("en_B") - self.add_scalar("en_f", compute="from_particles", variable=self.energetic_ions.var) - self.add_scalar( - "en_tot", - summands=[ - "en_U", - "en_p", - "en_B", - "en_f", - ], + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + + dct["em_fields"]["b_field"] = "Hdiv" + dct["fluid"]["mhd"] = { + "density": "L2", + "velocity": "Hdiv", + "pressure": "L2", + } + dct["kinetic"]["energetic_ions"] = "Particles6D" + return dct + + @staticmethod + def bulk_species(): + return "mhd" + + @staticmethod + def velocity_scale(): + return "alfvén" + + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushEtaPC: ["energetic_ions"], + propagators_markers.PushVxB: ["energetic_ions"], + propagators_coupling.PressureCoupling6D: ["energetic_ions", "mhd_velocity"], + propagators_fields.ShearAlfven: ["mhd_velocity", "b_field"], + propagators_fields.Magnetosonic: ["mhd_density", "mhd_velocity", "mhd_pressure"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["fluid", "mhd"], + key="u_space", + option="Hdiv", + dct=dct, ) + return dct - @property - def bulk_species(self): - return self.mhd + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - @property - def velocity_scale(self): - return "alfvén" + from struphy.polar.basic import PolarVector + + # extract necessary parameters + u_space = params["fluid"]["mhd"]["options"]["u_space"] + params_alfven = params["fluid"]["mhd"]["options"]["ShearAlfven"] + params_sonic = params["fluid"]["mhd"]["options"]["Magnetosonic"] + params_vxb = params["kinetic"]["energetic_ions"]["options"]["PushVxB"] + params_pressure = params["kinetic"]["energetic_ions"]["options"]["PressureCoupling6D"] + + # use perp model + assert ( + params["kinetic"]["energetic_ions"]["options"]["PressureCoupling6D"]["use_perp_model"] + == params["kinetic"]["energetic_ions"]["options"]["PressureCoupling6D"]["use_perp_model"] + ) + use_perp_model = params["kinetic"]["energetic_ions"]["options"]["PressureCoupling6D"]["use_perp_model"] + + # compute coupling parameters + Ab = params["fluid"]["mhd"]["phys_params"]["A"] + Ah = params["kinetic"]["energetic_ions"]["phys_params"]["A"] + epsilon = self.equation_params["energetic_ions"]["epsilon"] + + if abs(epsilon - 1) < 1e-6: + epsilon = 1.0 + + self._coupling_params = {} + self._coupling_params["Ab"] = Ab + self._coupling_params["Ah"] = Ah + self._coupling_params["epsilon"] = epsilon + + # add control variate to mass_ops object + if self.pointer["energetic_ions"].control_variate: + self.mass_ops.weights["f0"] = self.pointer["energetic_ions"].f0 + + # Project magnetic field + self._b_eq = self.derham.P["2"]( + [ + self.equil.b2_1, + self.equil.b2_2, + self.equil.b2_3, + ] + ) + self._p_eq = self.derham.P["3"](self.equil.p3) + self._ones = self._p_eq.space.zeros() - def allocate_helpers(self): - self._ones = self.projected_equil.p3.space.zeros() if isinstance(self._ones, PolarVector): self._ones.tp[:] = 1.0 else: self._ones[:] = 1.0 - self._en_f = xp.empty(1, dtype=float) - self._n_lost_particles = xp.empty(1, dtype=float) + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushEtaPC] = { + "u": self.pointer["mhd_velocity"], + "use_perp_model": use_perp_model, + "u_space": u_space, + } + + self._kwargs[propagators_markers.PushVxB] = { + "algo": params_vxb["algo"], + "kappa": epsilon, + "b2": self.pointer["b_field"], + "b2_add": self._b_eq, + } + + if params_pressure["turn_off"]: + self._kwargs[propagators_coupling.PressureCoupling6D] = None + else: + self._kwargs[propagators_coupling.PressureCoupling6D] = { + "use_perp_model": use_perp_model, + "u_space": u_space, + "solver": params_pressure["solver"], + "coupling_params": self._coupling_params, + "filter": params_pressure["filter"], + "boundary_cut": params_pressure["boundary_cut"], + } + + if params_alfven["turn_off"]: + self._kwargs[propagators_fields.ShearAlfven] = None + else: + self._kwargs[propagators_fields.ShearAlfven] = { + "u_space": u_space, + "solver": params_alfven["solver"], + } - def update_scalar_quantities(self): - # scaling factor - Ab = self.mhd.mass_number - Ah = self.energetic_ions.var.species.mass_number + if params_sonic["turn_off"]: + self._kwargs[propagators_fields.Magnetosonic] = None + else: + self._kwargs[propagators_fields.Magnetosonic] = { + "b": self.pointer["b_field"], + "u_space": u_space, + "solver": params_sonic["solver"], + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + # Scalar variables to be saved during simulation: + self.add_scalar("en_U", compute="from_field") + self.add_scalar("en_p", compute="from_field") + self.add_scalar("en_B", compute="from_field") + self.add_scalar("en_f", compute="from_particles", species="energetic_ions") + self.add_scalar("en_tot", summands=["en_U", "en_p", "en_B", "en_f"]) + self.add_scalar("n_lost_particles", compute="from_particles", species="energetic_ions") + + # temporary vectors for scalar quantities + self._tmp_u = self.derham.Vh["2"].zeros() + self._tmp_b1 = self.derham.Vh["2"].zeros() + self._tmp = np.empty(1, dtype=float) + self._n_lost_particles = np.empty(1, dtype=float) + + def update_scalar_quantities(self): # perturbed fields - en_U = 0.5 * self.mass_ops.M2n.dot_inner( - self.mhd.velocity.spline.vector, - self.mhd.velocity.spline.vector, - ) - en_B = 0.5 * self.mass_ops.M2.dot_inner( - self.em_fields.b_field.spline.vector, - self.em_fields.b_field.spline.vector, - ) - en_p = self.mhd.pressure.spline.vector.inner(self._ones) / (5 / 3 - 1) + if "Hdiv" == "Hdiv": + en_U = 0.5 * self.mass_ops.M2n.dot_inner(self.pointer["mhd_velocity"], self.pointer["mhd_velocity"]) + else: + en_U = 0.5 * self.mass_ops.Mvn.dot_inner(self.pointer["mhd_velocity"], self.pointer["mhd_velocity"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) + en_p = self.pointer["mhd_pressure"].inner(self._ones) / (5 / 3 - 1) self.update_scalar("en_U", en_U) self.update_scalar("en_B", en_B) self.update_scalar("en_p", en_p) - # particles' energy - particles = self.energetic_ions.var.particles - - self._en_f[0] = ( - particles.markers[~particles.holes, 6].dot( - particles.markers[~particles.holes, 3] ** 2 - + particles.markers[~particles.holes, 4] ** 2 - + particles.markers[~particles.holes, 5] ** 2, + # particles + self._tmp[0] = ( + self._coupling_params["Ah"] + / self._coupling_params["Ab"] + * self.pointer["energetic_ions"] + .markers_wo_holes[:, 6] + .dot( + self.pointer["energetic_ions"].markers_wo_holes[:, 3] ** 2 + + self.pointer["energetic_ions"].markers_wo_holes[:, 4] ** 2 + + self.pointer["energetic_ions"].markers_wo_holes[:, 5] ** 2, ) - / 2.0 - * Ah - / Ab + / (2.0) ) - self.update_scalar("en_f", self._en_f[0]) - self.update_scalar("en_tot") - - # print number of lost particles - n_lost_markers = xp.array(particles.n_lost_markers) - - if self.derham.comm is not None: - self.derham.comm.Allreduce( - MPI.IN_PLACE, - n_lost_markers, - op=MPI.SUM, - ) - - if self.clone_config is not None: - self.clone_config.inter_comm.Allreduce( - MPI.IN_PLACE, - n_lost_markers, - op=MPI.SUM, - ) + self.update_scalar("en_f", self._tmp[0]) + self.update_scalar("en_tot", en_U + en_B + en_p + self._tmp[0]) - if rank == 0: + # Print number of lost ions + self._n_lost_particles[0] = self.pointer["energetic_ions"].n_lost_markers + self.update_scalar("n_lost_particles", self._n_lost_particles[0]) + if self.rank_world == 0: print( - "Lost particle ratio: ", - n_lost_markers / particles.Np * 100, - "% \n", + "ratio of lost particles: ", + self._n_lost_particles[0] / self.pointer["energetic_ions"].Np * 100, + "%", ) - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "magnetosonic.Options" in line: - new_file += [ - """model.propagators.magnetosonic.options = model.propagators.magnetosonic.Options( - b_field=model.em_fields.b_field,)\n""", - ] - - elif "push_eta_pc.Options" in line: - new_file += [ - """model.propagators.push_eta_pc.options = model.propagators.push_eta_pc.Options( - u_tilde = model.mhd.velocity,)\n""", - ] - - elif "push_vxb.Options" in line: - new_file += [ - """model.propagators.push_vxb.options = model.propagators.push_vxb.Options( - b2_var = model.em_fields.b_field,)\n""", - ] - - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class LinearMHDDriftkineticCC(StruphyModel): r"""Hybrid linear ideal MHD + energetic ions (5D Driftkinetic) with **current coupling scheme**. @@ -505,13 +558,13 @@ class LinearMHDDriftkineticCC(StruphyModel): &\frac{\partial \tilde{\rho}}{\partial t}+\nabla\cdot(\rho_{0} \tilde{\mathbf{U}})=0\,, \\ \rho_{0} &\frac{\partial \tilde{\mathbf{U}}}{\partial t} - \tilde p\, \nabla - = (\nabla \times \tilde{\mathbf{B}}) \times \mathbf{B} + (\nabla \times \mathbf B_0) \times \tilde{\mathbf{B}} + = (\nabla \times \tilde{\mathbf{B}}) \times (\mathbf{B}_0 + (\nabla \times \mathbf B_0) \times \tilde{\mathbf{B}} + \frac{A_\textnormal{h}}{A_\textnormal{b}} \left[ \frac{1}{\epsilon} n_\textnormal{gc} \tilde{\mathbf{U}} - \frac{1}{\epsilon} \mathbf{J}_\textnormal{gc} - \nabla \times \mathbf{M}_\textnormal{gc} \right] \times \mathbf{B} \,, \\ &\frac{\partial \tilde p}{\partial t} + \nabla\cdot(p_0 \tilde{\mathbf{U}}) + \frac{2}{3}\,p_0\nabla\cdot \tilde{\mathbf{U}}=0\,, \\ - &\frac{\partial \tilde{\mathbf{B}}}{\partial t} - \nabla\times(\tilde{\mathbf{U}} \times \mathbf{B}) + &\frac{\partial \tilde{\mathbf{B}}}{\partial t} - \nabla\times(\tilde{\mathbf{U}} \times \mathbf{B}_0) = 0\,, \end{aligned} \right. @@ -524,7 +577,7 @@ class LinearMHDDriftkineticCC(StruphyModel): \\ & n_\textnormal{gc} = \int f_\textnormal{h} B_\parallel^* \,\textnormal dv_\parallel \textnormal d\mu \,, \\ - & \mathbf{J}_\textnormal{gc} = \int \frac{f_\textnormal{h}}{B_\parallel^*}(v_\parallel \mathbf{B}^* - \mathbf{b}_0 \times \mathbf{E}^*) \,\textnormal dv_\parallel \textnormal d\mu \,, + & \mathbf{J}_\textnormal{gc} = \int f_\textnormal{h}(v_\parallel \mathbf{B}^* - \mathbf{b}_0 \times \mathbf{E}^*) \,\textnormal dv_\parallel \textnormal d\mu \,, \\ & \mathbf{M}_\textnormal{gc} = - \int f_\textnormal{h} B_\parallel^* \mu \mathbf{b}_0 \,\textnormal dv_\parallel \textnormal d\mu \,, \end{aligned} @@ -536,11 +589,9 @@ class LinearMHDDriftkineticCC(StruphyModel): .. math:: \begin{align} - B^*_\parallel = \mathbf{b}_0 \cdot \mathbf{B}^*\,, - \\[2mm] - \mathbf{B}^* &= \mathbf{B} + \epsilon v_\parallel \nabla \times \mathbf{b}_0 \,, + \mathbf{B}^* &= \mathbf{B} + \epsilon v_\parallel \nabla \times \mathbf{b}_0 \,,\qquad B^*_\parallel = \mathbf{b}_0 \cdot \mathbf{B}^*\,, \\[2mm] - \mathbf{E}^* &= - \tilde{\mathbf{U}} \times \mathbf{B} - \epsilon \mu \nabla (\mathbf{b}_0 \cdot \mathbf{B}) \,, + \mathbf{E}^* &= - \tilde{\mathbf{U}} \times \mathbf{B} - \epsilon \mu \nabla B_\parallel \,, \end{align} with the normalization parameter @@ -557,238 +608,335 @@ class LinearMHDDriftkineticCC(StruphyModel): 4. :class:`~struphy.propagators.propagators_coupling.CurrentCoupling5DCurlb` 5. :class:`~struphy.propagators.propagators_fields.CurrentCoupling5DDensity` 6. :class:`~struphy.propagators.propagators_fields.ShearAlfvenCurrentCoupling5D` - 7. :class:`~struphy.propagators.propagators_fields.Magnetosonic` + 7. :class:`~struphy.propagators.propagators_fields.MagnetosonicCurrentCoupling5D` :ref:`Model info `: """ - ## species - class EnergeticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles5D") - self.init_variables() - - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.pressure = FEECVariable(space="L2") - self.velocity = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self, turn_off: tuple[str, ...] = (None,)): - if "PushGuidingCenterBxEstar" not in turn_off: - self.push_bxe = propagators_markers.PushGuidingCenterBxEstar() - if "PushGuidingCenterParallel" not in turn_off: - self.push_parallel = propagators_markers.PushGuidingCenterParallel() - if "ShearAlfvenCurrentCoupling5D" not in turn_off: - self.shearalfen_cc5d = propagators_fields.ShearAlfvenCurrentCoupling5D() - if "Magnetosonic" not in turn_off: - self.magnetosonic = propagators_fields.Magnetosonic() - if "CurrentCoupling5DDensity" not in turn_off: - self.cc5d_density = propagators_fields.CurrentCoupling5DDensity() - if "CurrentCoupling5DGradB" not in turn_off: - self.cc5d_gradb = propagators_coupling.CurrentCoupling5DGradB() - if "CurrentCoupling5DCurlb" not in turn_off: - self.cc5d_curlb = propagators_coupling.CurrentCoupling5DCurlb() - - def __init__(self, turn_off: tuple[str, ...] = (None,)): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() - self.energetic_ions = self.EnergeticIons() - - # 2. instantiate all propagators - self.propagators = self.Propagators(turn_off) - - # 3. assign variables to propagators - if "ShearAlfvenCurrentCoupling5D" not in turn_off: - self.propagators.shearalfen_cc5d.variables.u = self.mhd.velocity - self.propagators.shearalfen_cc5d.variables.b = self.em_fields.b_field - if "Magnetosonic" not in turn_off: - self.propagators.magnetosonic.variables.n = self.mhd.density - self.propagators.magnetosonic.variables.u = self.mhd.velocity - self.propagators.magnetosonic.variables.p = self.mhd.pressure - if "CurrentCoupling5DDensity" not in turn_off: - self.propagators.cc5d_density.variables.u = self.mhd.velocity - if "CurrentCoupling5DGradB" not in turn_off: - self.propagators.cc5d_gradb.variables.u = self.mhd.velocity - self.propagators.cc5d_gradb.variables.energetic_ions = self.energetic_ions.var - if "CurrentCoupling5DCurlb" not in turn_off: - self.propagators.cc5d_curlb.variables.u = self.mhd.velocity - self.propagators.cc5d_curlb.variables.energetic_ions = self.energetic_ions.var - if "PushGuidingCenterBxEstar" not in turn_off: - self.propagators.push_bxe.variables.ions = self.energetic_ions.var - if "PushGuidingCenterParallel" not in turn_off: - self.propagators.push_parallel.variables.ions = self.energetic_ions.var - - # define scalars for update_scalar_quantities - self.add_scalar("en_U") - self.add_scalar("en_p") - self.add_scalar("en_B") - self.add_scalar("en_fv", compute="from_particles", variable=self.energetic_ions.var) - self.add_scalar("en_fB", compute="from_particles", variable=self.energetic_ions.var) - self.add_scalar("en_tot", summands=["en_U", "en_p", "en_B", "en_fv", "en_fB"]) + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + + dct["em_fields"]["b_field"] = "Hdiv" + dct["fluid"]["mhd"] = { + "density": "L2", + "velocity": "Hdiv", + "pressure": "L2", + } + dct["kinetic"]["energetic_ions"] = "Particles5D" + return dct + + @staticmethod + def bulk_species(): + return "mhd" + + @staticmethod + def velocity_scale(): + return "alfvén" - @property - def bulk_species(self): - return self.mhd + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushGuidingCenterBxEstar: ["energetic_ions"], + propagators_markers.PushGuidingCenterParallel: ["energetic_ions"], + propagators_coupling.CurrentCoupling5DGradB: ["energetic_ions", "mhd_velocity"], + propagators_coupling.CurrentCoupling5DCurlb: ["energetic_ions", "mhd_velocity"], + propagators_fields.CurrentCoupling5DDensity: ["mhd_velocity"], + propagators_fields.ShearAlfvenCurrentCoupling5D: ["mhd_velocity", "b_field"], + propagators_fields.MagnetosonicCurrentCoupling5D: ["mhd_density", "mhd_velocity", "mhd_pressure"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["fluid", "mhd"], + key="u_space", + option="Hdiv", + dct=dct, + ) + return dct - @property - def velocity_scale(self): - return "alfvén" + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + from struphy.polar.basic import PolarVector + + # extract necessary parameters + u_space = params["fluid"]["mhd"]["options"]["u_space"] + params_alfven = params["fluid"]["mhd"]["options"]["ShearAlfvenCurrentCoupling5D"] + params_sonic = params["fluid"]["mhd"]["options"]["MagnetosonicCurrentCoupling5D"] + params_density = params["fluid"]["mhd"]["options"]["CurrentCoupling5DDensity"] + + params_bxE = params["kinetic"]["energetic_ions"]["options"]["PushGuidingCenterBxEstar"] + params_parallel = params["kinetic"]["energetic_ions"]["options"]["PushGuidingCenterParallel"] + params_cc_gradB = params["kinetic"]["energetic_ions"]["options"]["CurrentCoupling5DGradB"] + params_cc_curlb = params["kinetic"]["energetic_ions"]["options"]["CurrentCoupling5DCurlb"] + params_cc_gradB = params["kinetic"]["energetic_ions"]["options"]["CurrentCoupling5DGradB"] + + # compute coupling parameters + Ab = params["fluid"]["mhd"]["phys_params"]["A"] + Ah = params["kinetic"]["energetic_ions"]["phys_params"]["A"] + epsilon = self.equation_params["energetic_ions"]["epsilon"] + + self._coupling_params = {} + self._coupling_params["Ab"] = Ab + self._coupling_params["Ah"] = Ah + + # add control variate to mass_ops object + if self.pointer["energetic_ions"].control_variate: + self.mass_ops.weights["f0"] = self.pointer["energetic_ions"].f0 + + # Project magnetic field + self._b_eq = self.derham.P["2"]( + [ + self.equil.b2_1, + self.equil.b2_2, + self.equil.b2_3, + ] + ) + + self._absB0 = self.derham.P["0"](self.equil.absB0) + + self._unit_b1 = self.derham.P["1"]( + [ + self.equil.unit_b1_1, + self.equil.unit_b1_2, + self.equil.unit_b1_3, + ] + ) + + self._unit_b2 = self.derham.P["2"]( + [ + self.equil.unit_b2_1, + self.equil.unit_b2_2, + self.equil.unit_b2_3, + ] + ) + + self._gradB1 = self.derham.P["1"]( + [ + self.equil.gradB1_1, + self.equil.gradB1_2, + self.equil.gradB1_3, + ] + ) + + self._curl_unit_b2 = self.derham.P["2"]( + [ + self.equil.curl_unit_b2_1, + self.equil.curl_unit_b2_2, + self.equil.curl_unit_b2_3, + ] + ) + + self._p_eq = self.derham.P["3"](self.equil.p3) + self._ones = self._p_eq.space.zeros() - def allocate_helpers(self): - self._ones = self.projected_equil.p3.space.zeros() if isinstance(self._ones, PolarVector): self._ones.tp[:] = 1.0 else: self._ones[:] = 1.0 - self._en_fv = xp.empty(1, dtype=float) - self._en_fB = xp.empty(1, dtype=float) - self._en_tot = xp.empty(1, dtype=float) - self._n_lost_particles = xp.empty(1, dtype=float) + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushGuidingCenterBxEstar] = { + "b_tilde": self.pointer["b_field"], + "algo": params_bxE["algo"], + "epsilon": epsilon, + } + + self._kwargs[propagators_markers.PushGuidingCenterParallel] = { + "b_tilde": self.pointer["b_field"], + "algo": params_parallel["algo"], + "epsilon": epsilon, + } + + if params_cc_gradB["turn_off"]: + self._kwargs[propagators_coupling.CurrentCoupling5DGradB] = None + else: + self._kwargs[propagators_coupling.CurrentCoupling5DGradB] = { + "b": self.pointer["b_field"], + "b_eq": self._b_eq, + "unit_b1": self._unit_b1, + "unit_b2": self._unit_b2, + "absB0": self._absB0, + "gradB1": self._gradB1, + "curl_unit_b2": self._curl_unit_b2, + "u_space": u_space, + "solver": params_cc_gradB["solver"], + "algo": params_cc_gradB["algo"], + "filter": params_cc_gradB["filter"], + "coupling_params": self._coupling_params, + "epsilon": epsilon, + "boundary_cut": params_cc_gradB["boundary_cut"], + } + + if params_cc_curlb["turn_off"]: + self._kwargs[propagators_coupling.CurrentCoupling5DCurlb] = None + else: + self._kwargs[propagators_coupling.CurrentCoupling5DCurlb] = { + "b": self.pointer["b_field"], + "b_eq": self._b_eq, + "unit_b1": self._unit_b1, + "absB0": self._absB0, + "gradB1": self._gradB1, + "curl_unit_b2": self._curl_unit_b2, + "u_space": u_space, + "solver": params_cc_curlb["solver"], + "filter": params_cc_curlb["filter"], + "coupling_params": self._coupling_params, + "epsilon": epsilon, + "boundary_cut": params_cc_curlb["boundary_cut"], + } + + if params_density["turn_off"]: + self._kwargs[propagators_fields.CurrentCoupling5DDensity] = None + else: + self._kwargs[propagators_fields.CurrentCoupling5DDensity] = { + "particles": self.pointer["energetic_ions"], + "b": self.pointer["b_field"], + "b_eq": self._b_eq, + "unit_b1": self._unit_b1, + "curl_unit_b2": self._curl_unit_b2, + "u_space": u_space, + "solver": params_density["solver"], + "coupling_params": self._coupling_params, + "epsilon": epsilon, + "boundary_cut": params_density["boundary_cut"], + } + + if params_alfven["turn_off"]: + self._kwargs[propagators_fields.ShearAlfvenCurrentCoupling5D] = None + else: + self._kwargs[propagators_fields.ShearAlfvenCurrentCoupling5D] = { + "particles": self.pointer["energetic_ions"], + "unit_b1": self._unit_b1, + "absB0": self._absB0, + "u_space": u_space, + "solver": params_alfven["solver"], + "filter": params_alfven["filter"], + "coupling_params": self._coupling_params, + "accumulated_magnetization": self.pointer["accumulated_magnetization"], + "boundary_cut": params_alfven["boundary_cut"], + } + + if params_sonic["turn_off"]: + self._kwargs[propagators_fields.MagnetosonicCurrentCoupling5D] = None + else: + self._kwargs[propagators_fields.MagnetosonicCurrentCoupling5D] = { + "particles": self.pointer["energetic_ions"], + "b": self.pointer["b_field"], + "unit_b1": self._unit_b1, + "absB0": self._absB0, + "u_space": u_space, + "solver": params_sonic["solver"], + "filter": params_sonic["filter"], + "coupling_params": self._coupling_params, + "boundary_cut": params_sonic["boundary_cut"], + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + # Scalar variables to be saved during simulation + self.add_scalar("en_U", compute="from_field") + self.add_scalar("en_p", compute="from_field") + self.add_scalar("en_B", compute="from_field") + self.add_scalar("en_fv", compute="from_particles", species="energetic_ions") + self.add_scalar("en_fB", compute="from_particles", species="energetic_ions") + # self.add_scalar('en_fv_lost', compute = 'from_particles', species='energetic_ions') + # self.add_scalar('en_fB_lost', compute = 'from_particles', species='energetic_ions') + # self.add_scalar('en_tot',summands = ['en_U','en_p','en_B','en_fv','en_fB','en_fv_lost','en_fB_lost']) + self.add_scalar("en_tot", summands=["en_U", "en_p", "en_B", "en_fv", "en_fB"]) + self.add_scalar("n_lost_particles", compute="from_particles", species="energetic_ions") - self._PB = getattr(self.basis_ops, "PB") - self._PBb = self._PB.codomain.zeros() + # temporaries + self._b_full1 = self._b_eq.space.zeros() + self._PBb = self._absB0.space.zeros() - def update_scalar_quantities(self): - # scaling factor - Ab = self.mhd.mass_number - Ah = self.energetic_ions.var.species.mass_number + self._en_fv = np.empty(1, dtype=float) + self._en_fB = np.empty(1, dtype=float) + # self._en_fv_lost = np.empty(1, dtype=float) + # self._en_fB_lost = np.empty(1, dtype=float) + self._n_lost_particles = np.empty(1, dtype=float) - # perturbed fields - en_U = 0.5 * self.mass_ops.M2n.dot_inner( - self.mhd.velocity.spline.vector, - self.mhd.velocity.spline.vector, - ) - en_B = 0.5 * self.mass_ops.M2.dot_inner( - self.em_fields.b_field.spline.vector, - self.em_fields.b_field.spline.vector, - ) - en_p = self.mhd.pressure.spline.vector.inner(self._ones) / (5 / 3 - 1) + def update_scalar_quantities(self): + en_U = 0.5 * self.mass_ops.M2n.dot_inner(self.pointer["mhd_velocity"], self.pointer["mhd_velocity"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) + en_p = self.pointer["mhd_pressure"].inner(self._ones) / (5 / 3 - 1) self.update_scalar("en_U", en_U) - self.update_scalar("en_B", en_B) self.update_scalar("en_p", en_p) - - # particles' energy - particles = self.energetic_ions.var.particles + self.update_scalar("en_B", en_B) self._en_fv[0] = ( - particles.markers[~particles.holes, 5].dot( - particles.markers[~particles.holes, 3] ** 2, + self.pointer["energetic_ions"] + .markers[~self.pointer["energetic_ions"].holes, 5] + .dot( + self.pointer["energetic_ions"].markers[~self.pointer["energetic_ions"].holes, 3] ** 2, ) / (2.0) - * Ah - / Ab + * self._coupling_params["Ah"] + / self._coupling_params["Ab"] ) - self._PBb = self._PB.dot(self.em_fields.b_field.spline.vector) - particles.save_magnetic_energy(self._PBb) + self.update_scalar("en_fv", self._en_fv[0]) + + # self._en_fv_lost[0] = self.pointer['energetic_ions'].lost_markers[:self.pointer['energetic_ions'].n_lost_markers, 5].dot( + # self.pointer['energetic_ions'].lost_markers[:self.pointer['energetic_ions'].n_lost_markers, 3]**2) / (2.0) * self._coupling_params['Ah']/self._coupling_params['Ab'] + + # self.update_scalar('en_fv_lost', self._en_fv_lost[0]) + + # calculate particle magnetic energy + self.pointer["energetic_ions"].save_magnetic_energy( + self.pointer["b_field"], + ) self._en_fB[0] = ( - particles.markers[~particles.holes, 5].dot( - particles.markers[~particles.holes, 8], + self.pointer["energetic_ions"] + .markers[~self.pointer["energetic_ions"].holes, 5] + .dot( + self.pointer["energetic_ions"].markers[~self.pointer["energetic_ions"].holes, 8], ) - * Ah - / Ab + * self._coupling_params["Ah"] + / self._coupling_params["Ab"] ) - self.update_scalar("en_fv", self._en_fv[0]) self.update_scalar("en_fB", self._en_fB[0]) - self.update_scalar("en_tot") - # print number of lost particles - n_lost_markers = xp.array(particles.n_lost_markers) + # self._en_fB_lost[0] = self.pointer['energetic_ions'].lost_markers[:self.pointer['energetic_ions'].n_lost_markers, 5].dot( + # self.pointer['energetic_ions'] .lost_markers[:self.pointer['energetic_ions'].n_lost_markers, 8]) * self._coupling_params['Ah']/self._coupling_params['Ab'] - if self.derham.comm is not None: - self.derham.comm.Allreduce( - MPI.IN_PLACE, - n_lost_markers, - op=MPI.SUM, - ) + # self.update_scalar('en_fB_lost', self._en_fB_lost[0]) - if self.clone_config is not None: - self.clone_config.inter_comm.Allreduce( - MPI.IN_PLACE, - n_lost_markers, - op=MPI.SUM, - ) + self.update_scalar("en_tot") - if rank == 0: + # Print number of lost ions + self._n_lost_particles[0] = self.pointer["energetic_ions"].n_lost_markers + self.update_scalar("n_lost_particles", self._n_lost_particles[0]) + if self.rank_world == 0: print( - "Lost particle ratio: ", - n_lost_markers / particles.Np * 100, - "% \n", + "ratio of lost particles: ", + self._n_lost_particles[0] / self.pointer["energetic_ions"].Np * 100, + "%", ) - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "shearalfen_cc5d.Options" in line: - new_file += [ - """model.propagators.shearalfen_cc5d.options = model.propagators.shearalfen_cc5d.Options( - energetic_ions = model.energetic_ions.var,)\n""", - ] - - elif "magnetosonic.Options" in line: - new_file += [ - """model.propagators.magnetosonic.options = model.propagators.magnetosonic.Options( - b_field=model.em_fields.b_field,)\n""", - ] - - elif "cc5d_density.Options" in line: - new_file += [ - """model.propagators.cc5d_density.options = model.propagators.cc5d_density.Options( - energetic_ions = model.energetic_ions.var, - b_tilde = model.em_fields.b_field,)\n""", - ] - - elif "cc5d_curlb.Options" in line: - new_file += [ - """model.propagators.cc5d_curlb.options = model.propagators.cc5d_curlb.Options( - b_tilde = model.em_fields.b_field,)\n""", - ] - - elif "cc5d_gradb.Options" in line: - new_file += [ - """model.propagators.cc5d_gradb.options = model.propagators.cc5d_gradb.Options( - b_tilde = model.em_fields.b_field,)\n""", - ] - - elif "push_bxe.Options" in line: - new_file += [ - """model.propagators.push_bxe.options = model.propagators.push_bxe.Options( - b_tilde = model.em_fields.b_field,)\n""", - ] - - elif "push_parallel.Options" in line: - new_file += [ - """model.propagators.push_parallel.options = model.propagators.push_parallel.Options( - b_tilde = model.em_fields.b_field,)\n""", - ] - - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) + @staticmethod + def diagnostics_dct(): + dct = {} + + dct["accumulated_magnetization"] = "Hdiv" + return dct + + __diagnostics__ = diagnostics_dct() class ColdPlasmaVlasov(StruphyModel): @@ -812,22 +960,27 @@ class ColdPlasmaVlasov(StruphyModel): &\frac{\partial \mathbf B}{\partial t} + \nabla\times\mathbf E = 0\,, \\[2mm] -&\frac{\partial \mathbf E}{\partial t} + \nabla\times\mathbf B = - \frac{\alpha^2}{\varepsilon_\textnormal{h}} \left( \mathbf j_\textnormal{c} + \int_{\mathbb{R}^3} \mathbf{v} f \, \text{d}^3 \mathbf{v} \right) \,, + \frac{\alpha^2}{\varepsilon_\textnormal{c}} \left( \mathbf j_\textnormal{c} + \nu \int_{\mathbb{R}^3} \mathbf{v} f \, \text{d}^3 \mathbf{v} \right) \,, where :math:`(n_0,\mathbf B_0)` denotes a (inhomogeneous) background and .. math:: - \alpha = \frac{\hat \Omega_\textnormal{p,cold}}{\hat \Omega_\textnormal{c,cold}}\,, \qquad \varepsilon_\textnormal{c} = \frac{1}{\hat \Omega_\textnormal{c,cold} \hat t}\,, \qquad \varepsilon_\textnormal{h} = \frac{1}{\hat \Omega_\textnormal{c,hot} \hat t} \,. + \alpha = \frac{\hat \Omega_\textnormal{p,cold}}{\hat \Omega_\textnormal{c,cold}}\,, \qquad \varepsilon_\textnormal{c} = \frac{1}{\hat \Omega_\textnormal{c,cold} \hat t}\,, \qquad \varepsilon_\textnormal{h} = \frac{1}{\hat \Omega_\textnormal{c,hot} \hat t} \,, \qquad \nu = \frac{Z_\textnormal{h}}{Z_\textnormal{c}}\,. At initial time the Poisson equation is solved once to weakly satisfy the Gauss law: .. math:: \begin{align} - \nabla \cdot \mathbf{E} & = \nu \frac{\alpha^2}{\varepsilon_\textnormal{h}} \int_{\mathbb{R}^3} f \, \text{d}^3 \mathbf{v}\,. + \nabla \cdot \mathbf{E} & = \nu \frac{\alpha^2}{\varepsilon_\textnormal{c}} \int_{\mathbb{R}^3} f \, \text{d}^3 \mathbf{v}\,. \end{align} + Note + ---------- + If hot and cold particles are of the same species (:math:`Z_\textnormal{c} = Z_\textnormal{h} \,, A_\textnormal{c} = A_\textnormal{h}`) then :math:`\varepsilon_\textnormal{c} = \varepsilon_\textnormal{h}` and :math:`\nu = 1`. + + :ref:`propagators` (called in sequence): 1. :class:`~struphy.propagators.propagators_fields.Maxwell` @@ -836,177 +989,203 @@ class ColdPlasmaVlasov(StruphyModel): 4. :class:`~struphy.propagators.propagators_markers.PushVxB` 5. :class:`~struphy.propagators.propagators_markers.PushEta` 6. :class:`~struphy.propagators.propagators_coupling.VlasovAmpere` - """ - - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.e_field = FEECVariable(space="Hcurl") - self.b_field = FEECVariable(space="Hdiv") - self.phi = FEECVariable(space="H1") - self.init_variables() - - class ThermalElectrons(FluidSpecies): - def __init__(self): - self.current = FEECVariable(space="Hcurl") - self.init_variables() - - class HotElectrons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles6D") - self.init_variables() - ## propagators - - class Propagators: - def __init__(self): - self.maxwell = propagators_fields.Maxwell() - self.ohm = propagators_fields.OhmCold() - self.jxb = propagators_fields.JxBCold() - self.push_eta = propagators_markers.PushEta() - self.push_vxb = propagators_markers.PushVxB() - self.coupling_va = propagators_coupling.VlasovAmpere() + :ref:`Model info `: + """ - ## abstract methods + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + dct["em_fields"]["e_field"] = "Hcurl" + dct["em_fields"]["b_field"] = "Hdiv" + dct["fluid"]["cold_electrons"] = {"j": "Hcurl"} + dct["kinetic"]["hot_electrons"] = "Particles6D" + return dct - # 1. instantiate all species - self.em_fields = self.EMFields() - self.thermal_elec = self.ThermalElectrons() - self.hot_elec = self.HotElectrons() + @staticmethod + def bulk_species(): + return "cold_electrons" - # 2. instantiate all propagators - self.propagators = self.Propagators() + @staticmethod + def velocity_scale(): + return "light" - # 3. assign variables to propagators - self.propagators.maxwell.variables.e = self.em_fields.e_field - self.propagators.maxwell.variables.b = self.em_fields.b_field + @staticmethod + def propagators_dct(): + return { + propagators_fields.Maxwell: ["e_field", "b_field"], + propagators_fields.OhmCold: ["cold_electrons_j", "e_field"], + propagators_fields.JxBCold: ["cold_electrons_j"], + propagators_markers.PushEta: ["hot_electrons"], + propagators_markers.PushVxB: ["hot_electrons"], + propagators_coupling.VlasovAmpere: ["e_field", "hot_electrons"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["em_fields"], + option=propagators_fields.ImplicitDiffusion, + dct=dct, + ) + return dct - self.propagators.ohm.variables.j = self.thermal_elec.current - self.propagators.ohm.variables.e = self.em_fields.e_field + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - self.propagators.jxb.variables.j = self.thermal_elec.current + # Get rank and size + self._rank = self.rank_world - self.propagators.push_eta.variables.var = self.hot_elec.var - self.propagators.push_vxb.variables.ions = self.hot_elec.var + # prelim + hot_params = params["kinetic"]["hot_electrons"] - self.propagators.coupling_va.variables.e = self.em_fields.e_field - self.propagators.coupling_va.variables.ions = self.hot_elec.var + # model parameters + self._alpha = np.abs( + self.equation_params["cold_electrons"]["alpha"], + ) + self._epsilon_cold = self.equation_params["cold_electrons"]["epsilon"] + self._epsilon_hot = self.equation_params["hot_electrons"]["epsilon"] + + self._nu = hot_params["phys_params"]["Z"] / params["fluid"]["cold_electrons"]["phys_params"]["Z"] + + # Initialize background magnetic field from MHD equilibrium + self._b_background = self.derham.P["2"]( + [ + self.equil.b2_1, + self.equil.b2_2, + self.equil.b2_3, + ] + ) - # define scalars for update_scalar_quantities + # propagator parameters + params_maxwell = params["em_fields"]["options"]["Maxwell"]["solver"] + params_ohmcold = params["fluid"]["cold_electrons"]["options"]["OhmCold"]["solver"] + params_jxbcold = params["fluid"]["cold_electrons"]["options"]["JxBCold"]["solver"] + algo_eta = params["kinetic"]["hot_electrons"]["options"]["PushEta"]["algo"] + algo_vxb = params["kinetic"]["hot_electrons"]["options"]["PushVxB"]["algo"] + params_coupling = params["em_fields"]["options"]["VlasovAmpere"]["solver"] + self._poisson_params = params["em_fields"]["options"]["ImplicitDiffusion"]["solver"] + + # set keyword arguments for propagators + self._kwargs[propagators_fields.Maxwell] = {"solver": params_maxwell} + + self._kwargs[propagators_fields.OhmCold] = { + "alpha": self._alpha, + "epsilon": self._epsilon_cold, + "solver": params_ohmcold, + } + + self._kwargs[propagators_fields.JxBCold] = { + "epsilon": self._epsilon_cold, + "solver": params_jxbcold, + } + + self._kwargs[propagators_markers.PushEta] = {"algo": algo_eta} + + self._kwargs[propagators_markers.PushVxB] = { + "algo": algo_vxb, + "kappa": 1.0 / self._epsilon_cold, + "b2": self.pointer["b_field"], + "b2_add": self._b_background, + } + + self._kwargs[propagators_coupling.VlasovAmpere] = { + "c1": self._nu * self._alpha**2 / self._epsilon_cold, + "c2": 1.0 / self._epsilon_hot, + "solver": params_coupling, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during simulation self.add_scalar("en_E") self.add_scalar("en_B") self.add_scalar("en_J") - self.add_scalar("en_f", compute="from_particles", variable=self.hot_elec.var) + self.add_scalar("en_f", compute="from_particles", species="hot_electrons") self.add_scalar("en_tot") - # initial Poisson (not a propagator used in time stepping) - self.initial_poisson = propagators_fields.Poisson() - self.initial_poisson.variables.phi = self.em_fields.phi - - @property - def bulk_species(self): - return self.thermal_elec - - @property - def velocity_scale(self): - return "light" - - def allocate_helpers(self): - self._tmp = xp.empty(1, dtype=float) - - def update_scalar_quantities(self): - # e*M1*e/2 - e = self.em_fields.e_field.spline.vector - en_E = 0.5 * self.mass_ops.M1.dot_inner(e, e) - self.update_scalar("en_E", en_E) - - # alpha^2 / 2 / N * sum_p w_p v_p^2 - particles = self.hot_elec.var.particles - alpha = self.hot_elec.equation_params.alpha - self._tmp[0] = ( - alpha**2 - / (2 * particles.Np) - * xp.dot( - particles.markers_wo_holes[:, 3] ** 2 - + particles.markers_wo_holes[:, 4] ** 2 - + particles.markers_wo_holes[:, 5] ** 2, - particles.markers_wo_holes[:, 6], - ) - ) - self.update_scalar("en_f", self._tmp[0]) - - # en_tot = en_w + en_e - self.update_scalar("en_tot", en_E + self._tmp[0]) - - def allocate_propagators(self): - """Solve initial Poisson equation. - - :meta private: - """ + # temporaries + self._tmp = np.empty(1, dtype=float) - # initialize fields and particles - super().allocate_propagators() + def initialize_from_params(self): + """:meta private:""" + from psydac.linalg.stencil import StencilVector - if MPI.COMM_WORLD.Get_rank() == 0: - print("\nINITIAL POISSON SOLVE:") + from struphy.pic.accumulation.particles_to_grid import AccumulatorVector - # use control variate method - particles = self.hot_elec.var.particles - particles.update_weights() + # Initialize fields and particles + super().initialize_from_params() - # sanity check - # self.pointer['species1'].show_distribution_function( - # [True] + [False]*5, [xp.linspace(0, 1, 32)]) - - # accumulate charge density + # Accumulate charge density charge_accum = AccumulatorVector( - particles, + self.pointer["hot_electrons"], "H1", - Pyccelkernel(accum_kernels.charge_density_0form), + Pyccelkernel(accum_kernels.vlasov_maxwell_poisson), self.mass_ops, self.domain.args_domain, ) + charge_accum() - # another sanity check: compute FE coeffs of density - # charge_accum.show_accumulated_spline_field(self.mass_ops) - - alpha = self.hot_elec.equation_params.alpha - epsilon = self.hot_elec.equation_params.epsilon + # Locally subtract mean charge for solvability with periodic bc + if np.all(charge_accum.vectors[0].space.periods): + charge_accum._vectors[0][:] -= np.mean( + charge_accum.vectors[0].toarray()[charge_accum.vectors[0].toarray() != 0], + ) - self.initial_poisson.options.rho = charge_accum - self.initial_poisson.options.rho_coeffs = alpha**2 / epsilon - self.initial_poisson.allocate() + # Instantiate Poisson solver + _phi = StencilVector(self.derham.Vh["0"]) + poisson_solver = propagators_fields.ImplicitDiffusion( + _phi, + sigma_1=0, + rho=self._nu * self._alpha**2 / self._epsilon_cold * charge_accum.vectors[0], + x0=self._nu * self._alpha**2 / self._epsilon_cold * charge_accum.vectors[0], + solver=self._poisson_params, + ) # Solve with dt=1. and compute electric field - if MPI.COMM_WORLD.Get_rank() == 0: - print("\nSolving initial Poisson problem...") - self.initial_poisson(1.0) - - phi = self.initial_poisson.variables.phi.spline.vector - self.derham.grad.dot(-phi, out=self.em_fields.e_field.spline.vector) - if MPI.COMM_WORLD.Get_rank() == 0: - print("Done.") - - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "coupling_va.Options" in line: - new_file += [line] - new_file += ["model.initial_poisson.options = model.initial_poisson.Options()\n"] - elif "set_save_data" in line: - new_file += ["\nbinplot = BinningPlot(slice='e1', n_bins=128, ranges=(0.0, 1.0))\n"] - new_file += ["model.hot_elec.set_save_data(binning_plots=(binplot,))\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) + poisson_solver(1.0) + self.derham.grad.dot(-_phi, out=self.pointer["e_field"]) + + def update_scalar_quantities(self): + en_E = 0.5 * self.mass_ops.M1.dot_inner(self.pointer["e_field"], self.pointer["e_field"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) + en_J = ( + 0.5 + * self._alpha**2 + * self.mass_ops.M1ninv.dot_inner(self.pointer["cold_electrons_j"], self.pointer["cold_electrons_j"]) + ) + self.update_scalar("en_E", en_E) + self.update_scalar("en_B", en_B) + self.update_scalar("en_J", en_J) + + # nu alpha^2 eps_h / eps_c / 2 / N * sum_p w_p v_p^2 + self._tmp[0] = ( + self._nu + * self._alpha**2 + * self._epsilon_hot + / self._epsilon_cold + / (2 * self.pointer["hot_electrons"].Np) + * np.dot( + self.pointer["hot_electrons"].markers_wo_holes[:, 3] ** 2 + + self.pointer["hot_electrons"].markers_wo_holes[:, 4] ** 2 + + self.pointer["hot_electrons"].markers_wo_holes[:, 5] ** 2, + self.pointer["hot_electrons"].markers_wo_holes[:, 6], + ) + ) + + self.update_scalar("en_f", self._tmp[0]) + + # en_tot = en_E + en_B + en_J + en_w + self.update_scalar("en_tot", en_E + en_B + en_J + self._tmp[0]) diff --git a/src/struphy/models/kinetic.py b/src/struphy/models/kinetic.py index 3f327e9b2..dbf5f523e 100644 --- a/src/struphy/models/kinetic.py +++ b/src/struphy/models/kinetic.py @@ -1,19 +1,10 @@ -import cunumpy as xp -from psydac.ddm.mpi import mpi as MPI - -from struphy.feec.projectors import L2Projector from struphy.kinetic_background.base import KineticBackground -from struphy.kinetic_background.maxwellians import Maxwellian3D from struphy.models.base import StruphyModel -from struphy.models.species import FieldSpecies, FluidSpecies, ParticleSpecies -from struphy.models.variables import FEECVariable, PICVariable, SPHVariable, Variable from struphy.pic.accumulation import accum_kernels, accum_kernels_gc -from struphy.pic.accumulation.particles_to_grid import AccumulatorVector from struphy.propagators import propagators_coupling, propagators_fields, propagators_markers +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel -rank = MPI.COMM_WORLD.Get_rank() - class VlasovAmpereOneSpecies(StruphyModel): r"""Vlasov-Ampère equations for one species. @@ -82,166 +73,202 @@ class VlasovAmpereOneSpecies(StruphyModel): 1. :class:`~struphy.propagators.propagators_markers.PushEta` 2. :class:`~struphy.propagators.propagators_coupling.VlasovAmpere` 3. :class:`~struphy.propagators.propagators_markers.PushVxB` - """ - - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.e_field = FEECVariable(space="Hcurl") - self.phi = FEECVariable(space="H1") - self.init_variables() - - class KineticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles6D") - self.init_variables() - ## propagators + :ref:`Model info `: + """ - class Propagators: - def __init__(self, with_B0: bool = True): - self.push_eta = propagators_markers.PushEta() - if with_B0: - self.push_vxb = propagators_markers.PushVxB() - self.coupling_va = propagators_coupling.VlasovAmpere() + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - ## abstract methods + dct["em_fields"]["e_field"] = "Hcurl" + dct["kinetic"]["species1"] = "Particles6D" + return dct - def __init__(self, with_B0: bool = True): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def bulk_species(): + return "species1" - self.with_B0 = with_B0 + @staticmethod + def velocity_scale(): + return "light" - # 1. instantiate all species - self.em_fields = self.EMFields() - self.kinetic_ions = self.KineticIons() + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushEta: ["species1"], + propagators_markers.PushVxB: ["species1"], + propagators_coupling.VlasovAmpere: ["e_field", "species1"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["em_fields"], + option=propagators_fields.ImplicitDiffusion, + dct=dct, + ) + cls.add_option( + species=["kinetic", "species1"], + key="override_eq_params", + option=[False, {"alpha": 1.0, "epsilon": -1.0}], + dct=dct, + ) + return dct - # 2. instantiate all propagators - self.propagators = self.Propagators(with_B0=with_B0) + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - # 3. assign variables to propagators - self.propagators.push_eta.variables.var = self.kinetic_ions.var - if with_B0: - self.propagators.push_vxb.variables.ions = self.kinetic_ions.var - self.propagators.coupling_va.variables.e = self.em_fields.e_field - self.propagators.coupling_va.variables.ions = self.kinetic_ions.var + # get species paramaters + species1_params = params["kinetic"]["species1"] - # define scalars for update_scalar_quantities + # Get coupling strength + if species1_params["options"]["override_eq_params"]: + self._alpha = species1_params["options"]["override_eq_params"]["alpha"] + self._epsilon = species1_params["options"]["override_eq_params"]["epsilon"] + print( + f"\n!!! Override equation parameters: {self._alpha = } and {self._epsilon = }.", + ) + else: + self._alpha = self.equation_params["species1"]["alpha"] + self._epsilon = self.equation_params["species1"]["epsilon"] + + # Check if it is control-variate method + self._control_variate = species1_params["markers"]["control_variate"] + + # check mean velocity + # TODO: assert f0.params[] == 0. + + # Initialize background magnetic field from MHD equilibrium + if self.projected_equil: + self._b_background = self.projected_equil.b2 + else: + self._b_background = None + + # propagator parameters + self._poisson_params = params["em_fields"]["options"]["ImplicitDiffusion"]["solver"] + algo_eta = params["kinetic"]["species1"]["options"]["PushEta"]["algo"] + if self._b_background is not None: + algo_vxb = params["kinetic"]["species1"]["options"]["PushVxB"]["algo"] + params_coupling = params["em_fields"]["options"]["VlasovAmpere"]["solver"] + + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushEta] = { + "algo": algo_eta, + } + + # Only add PushVxB if magnetic field is not zero + self._kwargs[propagators_markers.PushVxB] = None + if self._b_background is not None: + self._kwargs[propagators_markers.PushVxB] = { + "algo": algo_vxb, + "b2": self._b_background, + "kappa": 1.0 / self._epsilon, + } + + self._kwargs[propagators_coupling.VlasovAmpere] = { + "c1": self._alpha**2 / self._epsilon, + "c2": 1.0 / self._epsilon, + "solver": params_coupling, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during the simulation self.add_scalar("en_E") - self.add_scalar("en_f", compute="from_particles", variable=self.kinetic_ions.var) + self.add_scalar("en_f", compute="from_particles", species="species1") self.add_scalar("en_tot") - # initial Poisson (not a propagator used in time stepping) - self.initial_poisson = propagators_fields.Poisson() - self.initial_poisson.variables.phi = self.em_fields.phi - - @property - def bulk_species(self): - return self.kinetic_ions - - @property - def velocity_scale(self): - return "light" - - def allocate_helpers(self): - self._tmp = xp.empty(1, dtype=float) - - def update_scalar_quantities(self): - # e*M1*e/2 - e = self.em_fields.e_field.spline.vector - en_E = 0.5 * self.mass_ops.M1.dot_inner(e, e) - self.update_scalar("en_E", en_E) - - # alpha^2 / 2 / N * sum_p w_p v_p^2 - particles = self.kinetic_ions.var.particles - alpha = self.kinetic_ions.equation_params.alpha - self._tmp[0] = ( - alpha**2 - / (2 * particles.Np) - * xp.dot( - particles.markers_wo_holes[:, 3] ** 2 - + particles.markers_wo_holes[:, 4] ** 2 - + particles.markers_wo_holes[:, 5] ** 2, - particles.markers_wo_holes[:, 6], - ) - ) - self.update_scalar("en_f", self._tmp[0]) - - # en_tot = en_w + en_e - self.update_scalar("en_tot", en_E + self._tmp[0]) + # temporaries + self._tmp = np.empty(1, dtype=float) - def allocate_propagators(self): + def initialize_from_params(self): """Solve initial Poisson equation. :meta private: """ + from struphy.pic.accumulation.particles_to_grid import AccumulatorVector + # initialize fields and particles - super().allocate_propagators() + super().initialize_from_params() - if MPI.COMM_WORLD.Get_rank() == 0: + if self.rank_world == 0: print("\nINITIAL POISSON SOLVE:") # use control variate method - particles = self.kinetic_ions.var.particles - particles.update_weights() + self.pointer["species1"].update_weights() # sanity check # self.pointer['species1'].show_distribution_function( - # [True] + [False]*5, [xp.linspace(0, 1, 32)]) + # [True] + [False]*5, [np.linspace(0, 1, 32)]) # accumulate charge density charge_accum = AccumulatorVector( - particles, + self.pointer["species1"], "H1", Pyccelkernel(accum_kernels.charge_density_0form), self.mass_ops, self.domain.args_domain, ) + charge_accum(self.pointer["species1"].vdim) + # another sanity check: compute FE coeffs of density # charge_accum.show_accumulated_spline_field(self.mass_ops) - alpha = self.kinetic_ions.equation_params.alpha - epsilon = self.kinetic_ions.equation_params.epsilon - - self.initial_poisson.options.rho = charge_accum - self.initial_poisson.options.rho_coeffs = alpha**2 / epsilon - self.initial_poisson.allocate() + # Instantiate Poisson solver + _phi = self.derham.Vh["0"].zeros() + poisson_solver = propagators_fields.ImplicitDiffusion( + _phi, + sigma_1=0.0, + sigma_2=0.0, + sigma_3=1.0, + rho=self._alpha**2 / self._epsilon * charge_accum.vectors[0], + solver=self._poisson_params, + ) # Solve with dt=1. and compute electric field - if MPI.COMM_WORLD.Get_rank() == 0: + if self.rank_world == 0: print("\nSolving initial Poisson problem...") - self.initial_poisson(1.0) + poisson_solver(1.0) - phi = self.initial_poisson.variables.phi.spline.vector - self.derham.grad.dot(-phi, out=self.em_fields.e_field.spline.vector) - if MPI.COMM_WORLD.Get_rank() == 0: + self.derham.grad.dot(-_phi, out=self.pointer["e_field"]) + if self.rank_world == 0: print("Done.") - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "coupling_va.Options" in line: - new_file += [line] - new_file += ["model.initial_poisson.options = model.initial_poisson.Options()\n"] - elif "push_vxb.Options" in line: - new_file += ["if model.with_B0:\n"] - new_file += [" " + line] - elif "set_save_data" in line: - new_file += ["\nbinplot = BinningPlot(slice='e1', n_bins=128, ranges=(0.0, 1.0))\n"] - new_file += ["model.kinetic_ions.set_save_data(binning_plots=(binplot,))\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) + def update_scalar_quantities(self): + # e*M1*e/2 + en_E = 0.5 * self.mass_ops.M1.dot_inner(self.pointer["e_field"], self.pointer["e_field"]) + self.update_scalar("en_E", en_E) + + # alpha^2 / 2 / N * sum_p w_p v_p^2 + self._tmp[0] = ( + self._alpha**2 + / (2 * self.pointer["species1"].Np) + * np.dot( + self.pointer["species1"].markers_wo_holes[:, 3] ** 2 + + self.pointer["species1"].markers_wo_holes[:, 4] ** 2 + + self.pointer["species1"].markers_wo_holes[:, 5] ** 2, + self.pointer["species1"].markers_wo_holes[:, 6], + ) + ) + + self.update_scalar("en_f", self._tmp[0]) + + # en_tot = en_w + en_e + self.update_scalar("en_tot", en_E + self._tmp[0]) class VlasovMaxwellOneSpecies(StruphyModel): @@ -323,171 +350,197 @@ class VlasovMaxwellOneSpecies(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.e_field = FEECVariable(space="Hcurl") - self.b_field = FEECVariable(space="Hdiv") - self.phi = FEECVariable(space="H1") - self.init_variables() + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class KineticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles6D") - self.init_variables() + dct["em_fields"]["e_field"] = "Hcurl" + dct["em_fields"]["b_field"] = "Hdiv" + dct["kinetic"]["species1"] = "Particles6D" + return dct - ## propagators + @staticmethod + def bulk_species(): + return "species1" - class Propagators: - def __init__(self): - self.maxwell = propagators_fields.Maxwell() - self.push_eta = propagators_markers.PushEta() - self.push_vxb = propagators_markers.PushVxB() - self.coupling_va = propagators_coupling.VlasovAmpere() - - ## abstract methods - - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def velocity_scale(): + return "light" - # 1. instantiate all species - self.em_fields = self.EMFields() - self.kinetic_ions = self.KineticIons() + @staticmethod + def propagators_dct(): + return { + propagators_fields.Maxwell: ["e_field", "b_field"], + propagators_markers.PushEta: ["species1"], + propagators_markers.PushVxB: ["species1"], + propagators_coupling.VlasovAmpere: ["e_field", "species1"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["em_fields"], + option=propagators_fields.ImplicitDiffusion, + dct=dct, + ) + cls.add_option( + species=["kinetic", "species1"], + key="override_eq_params", + option=[False, {"alpha": 1.0, "epsilon": -1.0}], + dct=dct, + ) + return dct - # 2. instantiate all propagators - self.propagators = self.Propagators() + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - # 3. assign variables to propagators - self.propagators.maxwell.variables.e = self.em_fields.e_field - self.propagators.maxwell.variables.b = self.em_fields.b_field - self.propagators.push_eta.variables.var = self.kinetic_ions.var - self.propagators.push_vxb.variables.ions = self.kinetic_ions.var - self.propagators.coupling_va.variables.e = self.em_fields.e_field - self.propagators.coupling_va.variables.ions = self.kinetic_ions.var + # get species paramaters + species1_params = params["kinetic"]["species1"] - # define scalars for update_scalar_quantities + # equation parameters + if species1_params["options"]["override_eq_params"]: + self._alpha = species1_params["options"]["override_eq_params"]["alpha"] + self._epsilon = species1_params["options"]["override_eq_params"]["epsilon"] + print( + f"\n!!! Override equation parameters: {self._alpha = } and {self._epsilon = }.", + ) + else: + self._alpha = self.equation_params["species1"]["alpha"] + self._epsilon = self.equation_params["species1"]["epsilon"] + + # set background density and mean velocity factors + self.pointer["species1"].f0.moment_factors["u"] = [ + self._epsilon / self._alpha**2, + ] * 3 + + # Initialize background magnetic field from MHD equilibrium + if self.projected_equil: + self._b_background = self.projected_equil.b2 + else: + self._b_background = None + + # propagator parameters + params_maxwell = params["em_fields"]["options"]["Maxwell"]["solver"] + algo_eta = params["kinetic"]["species1"]["options"]["PushEta"]["algo"] + algo_vxb = params["kinetic"]["species1"]["options"]["PushVxB"]["algo"] + params_coupling = params["em_fields"]["options"]["VlasovAmpere"]["solver"] + self._poisson_params = params["em_fields"]["options"]["ImplicitDiffusion"]["solver"] + + # set keyword arguments for propagators + self._kwargs[propagators_fields.Maxwell] = {"solver": params_maxwell} + + self._kwargs[propagators_markers.PushEta] = {"algo": algo_eta} + + self._kwargs[propagators_markers.PushVxB] = { + "algo": algo_vxb, + "kappa": 1.0 / self._epsilon, + "b2": self.pointer["b_field"], + "b2_add": self._b_background, + } + + self._kwargs[propagators_coupling.VlasovAmpere] = { + "c1": self._alpha**2 / self._epsilon, + "c2": 1.0 / self._epsilon, + "solver": params_coupling, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # Scalar variables to be saved during the simulation self.add_scalar("en_E") self.add_scalar("en_B") - self.add_scalar("en_f", compute="from_particles", variable=self.kinetic_ions.var) + self.add_scalar("en_f", compute="from_particles", species="species1") self.add_scalar("en_tot") - # initial Poisson (not a propagator used in time stepping) - self.initial_poisson = propagators_fields.Poisson() - self.initial_poisson.variables.phi = self.em_fields.phi - - @property - def bulk_species(self): - return self.kinetic_ions - - @property - def velocity_scale(self): - return "light" - - def allocate_helpers(self): - self._tmp = xp.empty(1, dtype=float) - - def update_scalar_quantities(self): - # e*M1*e/2 - e = self.em_fields.e_field.spline.vector - b = self.em_fields.b_field.spline.vector - - en_E = 0.5 * self.mass_ops.M1.dot_inner(e, e) - self.update_scalar("en_E", en_E) - - en_B = 0.5 * self.mass_ops.M2.dot_inner(b, b) - self.update_scalar("en_B", en_B) - - # alpha^2 / 2 / N * sum_p w_p v_p^2 - particles = self.kinetic_ions.var.particles - alpha = self.kinetic_ions.equation_params.alpha - self._tmp[0] = ( - alpha**2 - / (2 * particles.Np) - * xp.dot( - particles.markers_wo_holes[:, 3] ** 2 - + particles.markers_wo_holes[:, 4] ** 2 - + particles.markers_wo_holes[:, 5] ** 2, - particles.markers_wo_holes[:, 6], - ) - ) - self.update_scalar("en_f", self._tmp[0]) - - # en_tot = en_w + en_e - self.update_scalar("en_tot", en_E + self._tmp[0]) + # temporaries + self._tmp = np.empty(1, dtype=float) - def allocate_propagators(self): - """Solve initial Poisson equation. + def initialize_from_params(self): + """:meta private:""" - :meta private: - """ + from struphy.pic.accumulation.particles_to_grid import AccumulatorVector # initialize fields and particles - super().allocate_propagators() + super().initialize_from_params() - if MPI.COMM_WORLD.Get_rank() == 0: + if self.rank_world == 0: print("\nINITIAL POISSON SOLVE:") # use control variate method - particles = self.kinetic_ions.var.particles - particles.update_weights() + self.pointer["species1"].update_weights() # sanity check # self.pointer['species1'].show_distribution_function( - # [True] + [False]*5, [xp.linspace(0, 1, 32)]) + # [True] + [False]*5, [np.linspace(0, 1, 32)]) # accumulate charge density charge_accum = AccumulatorVector( - particles, + self.pointer["species1"], "H1", Pyccelkernel(accum_kernels.charge_density_0form), self.mass_ops, self.domain.args_domain, ) + charge_accum(self.pointer["species1"].vdim) + # another sanity check: compute FE coeffs of density # charge_accum.show_accumulated_spline_field(self.mass_ops) - alpha = self.kinetic_ions.equation_params.alpha - epsilon = self.kinetic_ions.equation_params.epsilon - - self.initial_poisson.options.rho = charge_accum - self.initial_poisson.options.rho_coeffs = alpha**2 / epsilon - self.initial_poisson.allocate() + # Instantiate Poisson solver + _phi = self.derham.Vh["0"].zeros() + poisson_solver = propagators_fields.ImplicitDiffusion( + _phi, + sigma_1=0.0, + sigma_2=0.0, + sigma_3=1.0, + rho=self._alpha**2 / self._epsilon * charge_accum.vectors[0], + solver=self._poisson_params, + ) # Solve with dt=1. and compute electric field - if MPI.COMM_WORLD.Get_rank() == 0: + if self.rank_world == 0: print("\nSolving initial Poisson problem...") - self.initial_poisson(1.0) + poisson_solver(1.0) - phi = self.initial_poisson.variables.phi.spline.vector - self.derham.grad.dot(-phi, out=self.em_fields.e_field.spline.vector) - if MPI.COMM_WORLD.Get_rank() == 0: + self.derham.grad.dot(-_phi, out=self.pointer["e_field"]) + if self.rank_world == 0: print("Done.") - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "coupling_va.Options" in line: - new_file += [line] - new_file += ["model.initial_poisson.options = model.initial_poisson.Options()\n"] - elif "push_vxb.Options" in line: - new_file += [ - "model.propagators.push_vxb.options = model.propagators.push_vxb.Options(b2_var=model.em_fields.b_field)\n", - ] - elif "set_save_data" in line: - new_file += ["\nbinplot = BinningPlot(slice='e1', n_bins=128, ranges=(0.0, 1.0))\n"] - new_file += ["model.kinetic_ions.set_save_data(binning_plots=(binplot,))\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) + def update_scalar_quantities(self): + # e*M1*e and b*M2*b + en_E = 0.5 * self.mass_ops.M1.dot_inner(self.pointer["e_field"], self.pointer["e_field"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) + self.update_scalar("en_E", en_E) + self.update_scalar("en_B", en_B) + + # alpha^2 / 2 / N * sum_p w_p v_p^2 + self._tmp[0] = ( + self._alpha**2 + / (2 * self.pointer["species1"].Np) + * np.dot( + self.pointer["species1"].markers_wo_holes[:, 3] ** 2 + + self.pointer["species1"].markers_wo_holes[:, 4] ** 2 + + self.pointer["species1"].markers_wo_holes[:, 5] ** 2, + self.pointer["species1"].markers_wo_holes[:, 6], + ) + ) + + self.update_scalar("en_f", self._tmp[0]) + + # en_tot = en_w + en_e + en_b + self.update_scalar("en_tot", en_E + en_B + self._tmp[0]) class LinearVlasovAmpereOneSpecies(StruphyModel): @@ -557,188 +610,245 @@ class LinearVlasovAmpereOneSpecies(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.e_field = FEECVariable(space="Hcurl") - self.phi = FEECVariable(space="H1") - self.init_variables() - - class KineticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="DeltaFParticles6D") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_B0: bool = True, - with_E0: bool = True, - ): - self.push_eta = propagators_markers.PushEta() - if with_E0: - self.push_vinE = propagators_markers.PushVinEfield() - self.coupling_Eweights = propagators_coupling.EfieldWeights() - if with_B0: - self.push_vxb = propagators_markers.PushVxB() - - ## abstract methods - - def __init__( - self, - with_B0: bool = True, - with_E0: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.kinetic_ions = self.KineticIons() - - # 2. instantiate all propagators - self.propagators = self.Propagators(with_B0=with_B0, with_E0=with_E0) - - # 3. assign variables to propagators - self.propagators.push_eta.variables.var = self.kinetic_ions.var - if with_E0: - self.propagators.push_vinE.variables.var = self.kinetic_ions.var - self.propagators.coupling_Eweights.variables.e = self.em_fields.e_field - self.propagators.coupling_Eweights.variables.ions = self.kinetic_ions.var - if with_B0: - self.propagators.push_vxb.variables.ions = self.kinetic_ions.var - - # define scalars for update_scalar_quantities - self.add_scalar("en_E") - self.add_scalar("en_w", compute="from_particles", variable=self.kinetic_ions.var) - self.add_scalar("en_tot") + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - # initial Poisson (not a propagator used in time stepping) - self.initial_poisson = propagators_fields.Poisson() - self.initial_poisson.variables.phi = self.em_fields.phi + dct["em_fields"]["e_field"] = "Hcurl" + dct["kinetic"]["species1"] = "DeltaFParticles6D" + return dct - @property - def bulk_species(self): - return self.kinetic_ions + @staticmethod + def bulk_species(): + return "species1" - @property - def velocity_scale(self): + @staticmethod + def velocity_scale(): return "light" - def allocate_helpers(self): - self._tmp = xp.empty(1, dtype=float) + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushEta: ["species1"], + propagators_markers.PushVinEfield: ["species1"], + propagators_coupling.EfieldWeights: ["e_field", "species1"], + propagators_markers.PushVxB: ["species1"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["em_fields"], + option=propagators_fields.ImplicitDiffusion, + dct=dct, + ) + cls.add_option( + species=["kinetic", "species1"], + key="override_eq_params", + option=[False, {"epsilon": -1.0, "alpha": 1.0}], + dct=dct, + ) + return dct - def update_scalar_quantities(self): - # e*M1*e/2 - e = self.em_fields.e_field.spline.vector - particles = self.kinetic_ions.var.particles + def __init__(self, params, comm, clone_config=None, baseclass=False): + """Initializes the model either as the full model or as a baseclass to inherit from. + In case of being a baseclass, the propagators will not be initialized in the __init__ which allows other propagators to be added. - en_E = 0.5 * self.mass_ops.M1.dot_inner(e, e) - self.update_scalar("en_E", en_E) + Parameters + ---------- + baseclass : Boolean [optional] + If this model should be used as a baseclass. Default value is False. + """ - # evaluate f0 - if not hasattr(self, "_f0"): - backgrounds = self.kinetic_ions.var.backgrounds - if isinstance(backgrounds, list): - self._f0 = backgrounds[0] - else: - self._f0 = backgrounds - self._f0_values = xp.zeros( - self.kinetic_ions.var.particles.markers.shape[0], - dtype=float, - ) - assert isinstance(self._f0, Maxwellian3D) + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - self._f0_values[particles.valid_mks] = self._f0(*particles.phasespace_coords.T) + from struphy.kinetic_background import maxwellians - # alpha^2 * v_th^2 / (2*N) * sum_p s_0 * w_p^2 / f_{0,p} - alpha = self.kinetic_ions.equation_params.alpha - vth = self._f0.maxw_params["vth1"][0] + # if model is used as a baseclass + self._baseclass = baseclass - self._tmp[0] = ( - alpha**2 - * vth**2 - / (2 * particles.Np) - * xp.dot( - particles.weights**2, # w_p^2 - particles.sampling_density / self._f0_values[particles.valid_mks], # s_{0,p} / f_{0,p} + # kinetic parameters + self._species_params = params["kinetic"]["species1"] + + # Assert Maxwellian background (if list, the first entry is taken) + bckgr_params = self._species_params["background"] + li_bp = list(bckgr_params) + assert li_bp[0] == "Maxwellian3D", "The background distribution function must be a uniform Maxwellian!" + if len(li_bp) > 1: + # overwrite f0 with single Maxwellian + self._f0 = getattr(maxwellians, li_bp[0][:-2])( + maxw_params=bckgr_params[li_bp[0]], ) + else: + # keep allocated background + self._f0 = self.pointer["species1"].f0 + + # Assert uniformity of the Maxwellian background + assert self._f0.maxw_params["u1"] == 0.0, "The background Maxwellian cannot have shifts in velocity space!" + assert self._f0.maxw_params["u2"] == 0.0, "The background Maxwellian cannot have shifts in velocity space!" + assert self._f0.maxw_params["u3"] == 0.0, "The background Maxwellian cannot have shifts in velocity space!" + assert self._f0.maxw_params["vth1"] == self._f0.maxw_params["vth2"] == self._f0.maxw_params["vth3"], ( + "The background Maxwellian must be isotropic in velocity space!" + ) + self.vth = self._f0.maxw_params["vth1"] + + # Get coupling strength + if self._species_params["options"]["override_eq_params"]: + self.epsilon = self._species_params["options"]["override_eq_params"]["epsilon"] + self.alpha = self._species_params["options"]["override_eq_params"]["alpha"] + if self.rank_world == 0: + print( + f"\n!!! Override equation parameters: {self.epsilon = }, {self.alpha = }.\n", + ) + else: + self.epsilon = self.equation_params["species1"]["epsilon"] + self.alpha = self.equation_params["species1"]["alpha"] + + # allocate memory for evaluating f0 in energy computation + self._f0_values = np.zeros( + self.pointer["species1"].markers.shape[0], + dtype=float, ) - self.update_scalar("en_w", self._tmp[0]) - self.update_scalar("en_tot", self._tmp[0] + en_E) + # ==================================================================================== + # Create pointers to background electric potential and field + self._has_background_e = False + if "external_E0" in self.params["em_fields"]["options"].keys(): + e0 = self.params["em_fields"]["options"]["external_E0"] + if e0 != 0.0: + self._has_background_e = True + self._e_background = self.derham.Vh["1"].zeros() + for block in self._e_background._blocks: + block._data[:, :, :] += e0 + + # Get parameters of the background magnetic field + if self.projected_equil: + self._b_background = self.projected_equil.b2 + else: + self._b_background = None + # ==================================================================================== + + # propagator parameters + self._poisson_params = params["em_fields"]["options"]["ImplicitDiffusion"]["solver"] + algo_eta = params["kinetic"]["species1"]["options"]["PushEta"]["algo"] + params_coupling = params["em_fields"]["options"]["EfieldWeights"]["solver"] + + # Initialize propagators/integrators used in splitting substeps + self._kwargs[propagators_markers.PushEta] = { + "algo": algo_eta, + } + + # Only add PushVinEfield if e-field is non-zero, otherwise it is more expensive + if self._has_background_e: + self._kwargs[propagators_markers.PushVinEfield] = { + "e_field": self._e_background, + "kappa": 1.0 / self.epsilon, + } + else: + self._kwargs[propagators_markers.PushVinEfield] = None + + self._kwargs[propagators_coupling.EfieldWeights] = { + "alpha": self.alpha, + "kappa": 1.0 / self.epsilon, + "f0": self._f0, + "solver": params_coupling, + } + + # Only add PushVxB if magnetic field is not zero + self._kwargs[propagators_markers.PushVxB] = None + if self._b_background: + self._kwargs[propagators_markers.PushVxB] = { + "kappa": 1.0 / self.epsilon, + "b2": self._b_background, + } + + # Initialize propagators used in splitting substeps + if not self._baseclass: + self.init_propagators() + + # Scalar variables to be saved during the simulation + self.add_scalar("en_E") + self.add_scalar("en_w", compute="from_particles", species="species1") + self.add_scalar("en_tot") + + # temporaries + self._tmp = np.empty(1, dtype=float) + self.en_E = 0.0 - def allocate_propagators(self): + def initialize_from_params(self): """Solve initial Poisson equation. :meta private: """ + from struphy.pic.accumulation.particles_to_grid import AccumulatorVector - # initialize fields and particles - super().allocate_propagators() - - if MPI.COMM_WORLD.Get_rank() == 0: - print("\nINITIAL POISSON SOLVE:") - - # use control variate method - particles = self.kinetic_ions.var.particles - particles.update_weights() - - # sanity check - # self.pointer['species1'].show_distribution_function( - # [True] + [False]*5, [xp.linspace(0, 1, 32)]) + # Initialize fields and particles + super().initialize_from_params() - # accumulate charge density + # Accumulate charge density charge_accum = AccumulatorVector( - particles, + self.pointer["species1"], "H1", Pyccelkernel(accum_kernels.charge_density_0form), self.mass_ops, self.domain.args_domain, ) - # another sanity check: compute FE coeffs of density - # charge_accum.show_accumulated_spline_field(self.mass_ops) - - alpha = self.kinetic_ions.equation_params.alpha - epsilon = self.kinetic_ions.equation_params.epsilon - - self.initial_poisson.options.rho = charge_accum - self.initial_poisson.options.rho_coeffs = alpha**2 / epsilon - self.initial_poisson.allocate() + charge_accum(self.pointer["species1"].vdim) + + # Instantiate Poisson solver + _phi = self.derham.Vh["0"].zeros() + poisson_solver = propagators_fields.ImplicitDiffusion( + _phi, + sigma_1=0.0, + sigma_2=0.0, + sigma_3=1.0, + rho=self.alpha**2 / self.epsilon * charge_accum.vectors[0], + solver=self._poisson_params, + ) # Solve with dt=1. and compute electric field - if MPI.COMM_WORLD.Get_rank() == 0: + if self.rank_world == 0: print("\nSolving initial Poisson problem...") - self.initial_poisson(1.0) - - phi = self.initial_poisson.variables.phi.spline.vector - self.derham.grad.dot(-phi, out=self.em_fields.e_field.spline.vector) - if MPI.COMM_WORLD.Get_rank() == 0: + poisson_solver(1.0) + self.derham.grad.dot(-_phi, out=self.pointer["e_field"]) + if self.rank_world == 0: print("Done.") - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "maxwellian_1 + maxwellian_2" in line: - new_file += ["background = maxwellian_1\n"] - elif "maxwellian_1pt =" in line: - new_file += ["maxwellian_1pt = maxwellians.Maxwellian3D(n=(0.0, perturbation))\n"] - elif "set_save_data" in line: - new_file += ["\nbinplot = BinningPlot(slice='e1', n_bins=128, ranges=(0.0, 1.0))\n"] - new_file += ["model.kinetic_ions.set_save_data(binning_plots=(binplot,))\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) + def update_scalar_quantities(self): + # 0.5 * e^T * M_1 * e + self.en_E = 0.5 * self.mass_ops.M1.dot_inner(self.pointer["e_field"], self.pointer["e_field"]) + self.update_scalar("en_E", self.en_E) + + # evaluate f0 + self._f0_values[self.pointer["species1"].valid_mks] = self._f0(*self.pointer["species1"].phasespace_coords.T) + + # alpha^2 * v_th^2 / (2*N) * sum_p s_0 * w_p^2 / f_{0,p} + self._tmp[0] = ( + self.alpha**2 + * self.vth**2 + / (2 * self.pointer["species1"].Np) + * np.dot( + self.pointer["species1"].weights ** 2, # w_p^2 + self.pointer["species1"].sampling_density + / self._f0_values[self.pointer["species1"].valid_mks], # s_{0,p} / f_{0,p} + ) + ) + + self.update_scalar("en_w", self._tmp[0]) + + # en_tot = en_w + en_e + if not self._baseclass: + self.update_scalar("en_tot", self._tmp[0] + self.en_E) class LinearVlasovMaxwellOneSpecies(LinearVlasovAmpereOneSpecies): @@ -811,82 +921,80 @@ class LinearVlasovMaxwellOneSpecies(LinearVlasovAmpereOneSpecies): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.e_field = FEECVariable(space="Hcurl") - self.b_field = FEECVariable(space="Hdiv") - self.phi = FEECVariable(space="H1") - self.init_variables() - - class KineticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="DeltaFParticles6D") - self.init_variables() - - ## propagators - - class Propagators: - def __init__( - self, - with_B0: bool = True, - with_E0: bool = True, - ): - self.push_eta = propagators_markers.PushEta() - if with_E0: - self.push_vinE = propagators_markers.PushVinEfield() - self.coupling_Eweights = propagators_coupling.EfieldWeights() - if with_B0: - self.push_vxb = propagators_markers.PushVxB() - self.maxwell = propagators_fields.Maxwell() - - ## abstract methods - - def __init__( - self, - with_B0: bool = True, - with_E0: bool = True, - ): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.kinetic_ions = self.KineticIons() - - # 2. instantiate all propagators - self.propagators = self.Propagators(with_B0=with_B0, with_E0=with_E0) - - # 3. assign variables to propagators - self.propagators.push_eta.variables.var = self.kinetic_ions.var - if with_E0: - self.propagators.push_vinE.variables.var = self.kinetic_ions.var - self.propagators.coupling_Eweights.variables.e = self.em_fields.e_field - self.propagators.coupling_Eweights.variables.ions = self.kinetic_ions.var - if with_B0: - self.propagators.push_vxb.variables.ions = self.kinetic_ions.var - self.propagators.maxwell.variables.e = self.em_fields.e_field - self.propagators.maxwell.variables.b = self.em_fields.b_field - - # define scalars for update_scalar_quantities - self.add_scalar("en_E") - self.add_scalar("en_B") - self.add_scalar("en_w", compute="from_particles", variable=self.kinetic_ions.var) - self.add_scalar("en_tot") + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + + dct["em_fields"]["e_field"] = "Hcurl" + dct["em_fields"]["b_field"] = "Hdiv" + dct["kinetic"]["species1"] = "DeltaFParticles6D" + return dct + + @staticmethod + def bulk_species(): + return "species1" + + @staticmethod + def velocity_scale(): + return "light" + + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushEta: ["species1"], + propagators_markers.PushVinEfield: ["species1"], + propagators_coupling.EfieldWeights: ["e_field", "species1"], + propagators_markers.PushVxB: ["species1"], + propagators_fields.Maxwell: ["e_field", "b_field"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["em_fields"], + option=propagators_fields.ImplicitDiffusion, + dct=dct, + ) + cls.add_option( + species=["kinetic", "species1"], + key="override_eq_params", + option=[False, {"epsilon": -1.0, "alpha": 1.0}], + dct=dct, + ) + return dct + + def __init__(self, params, comm, clone_config=None): + super().__init__(params=params, comm=comm, clone_config=clone_config, baseclass=True) + + # propagator parameters + params_maxwell = params["em_fields"]["options"]["Maxwell"]["solver"] + + # set keyword arguments for propagators + self._kwargs[propagators_fields.Maxwell] = {"solver": params_maxwell} + + # Initialize propagators used in splitting substeps + self.init_propagators() - # initial Poisson (not a propagator used in time stepping) - self.initial_poisson = propagators_fields.Poisson() - self.initial_poisson.variables.phi = self.em_fields.phi + # magnetic energy + self.add_scalar("en_b") + + def initialize_from_params(self): + super().initialize_from_params() def update_scalar_quantities(self): super().update_scalar_quantities() # 0.5 * b^T * M_2 * b - b = self.em_fields.b_field.spline.vector - - en_B = 0.5 * self._mass_ops.M2.dot_inner(b, b) - self.update_scalar("en_tot", self.scalar_quantities["en_tot"]["value"][0] + en_B) + en_B = 0.5 * self._mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) + self.update_scalar("en_tot", self._tmp[0] + self.en_E + en_B) class DriftKineticElectrostaticAdiabatic(StruphyModel): @@ -938,159 +1046,153 @@ class DriftKineticElectrostaticAdiabatic(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.phi = FEECVariable(space="H1") - self.init_variables() - - class KineticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles5D") - self.init_variables() - - ## propagators + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class Propagators: - def __init__(self): - self.gc_poisson = propagators_fields.ImplicitDiffusion() - self.push_gc_bxe = propagators_markers.PushGuidingCenterBxEstar() - self.push_gc_para = propagators_markers.PushGuidingCenterParallel() + dct["em_fields"]["phi"] = "H1" + dct["kinetic"]["ions"] = "Particles5D" + return dct - ## abstract methods + @staticmethod + def bulk_species(): + return "ions" - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.kinetic_ions = self.KineticIons() - - # 2. instantiate all propagators - self.propagators = self.Propagators() - - # 3. assign variables to propagators - self.propagators.gc_poisson.variables.phi = self.em_fields.phi - self.propagators.push_gc_bxe.variables.ions = self.kinetic_ions.var - self.propagators.push_gc_para.variables.ions = self.kinetic_ions.var - - # define scalars for update_scalar_quantities - self.add_scalar("en_phi") - self.add_scalar("en_particles", compute="from_particles", variable=self.kinetic_ions.var) - self.add_scalar("en_tot") - - @property - def bulk_species(self): - return self.kinetic_ions - - @property - def velocity_scale(self): + @staticmethod + def velocity_scale(): return "thermal" - def allocate_helpers(self): - self._tmp3 = xp.empty(1, dtype=float) - self._e_field = self.derham.Vh["1"].zeros() + @staticmethod + def propagators_dct(): + return { + propagators_fields.ImplicitDiffusion: ["phi"], + propagators_markers.PushGuidingCenterBxEstar: ["ions"], + propagators_markers.PushGuidingCenterParallel: ["ions"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["kinetic", "ions"], + key="override_eq_params", + option=[False, {"epsilon": 1.0}], + dct=dct, + ) + return dct - assert self.kinetic_ions.charge_number > 0, "Model written only for positive ions." + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - def allocate_propagators(self): - """Solve initial Poisson equation. + from struphy.feec.projectors import L2Projector + from struphy.pic.accumulation.particles_to_grid import AccumulatorVector - :meta private: - """ + # prelim + solver_params = params["em_fields"]["options"]["ImplicitDiffusion"]["solver"] + ions_params = params["kinetic"]["ions"] - # initialize fields and particles - super().allocate_propagators() + Z = ions_params["phys_params"]["Z"] + assert Z > 0 # must be positive ions # Poisson right-hand side - particles = self.kinetic_ions.var.particles - Z = self.kinetic_ions.charge_number - epsilon = self.kinetic_ions.equation_params.epsilon - charge_accum = AccumulatorVector( - particles, + self.pointer["ions"], "H1", Pyccelkernel(accum_kernels_gc.gc_density_0form), self.mass_ops, self.domain.args_domain, ) - rho = charge_accum + rho = (charge_accum, self.pointer["ions"]) # get neutralizing background density - if not particles.control_variate: + if not self.pointer["ions"].control_variate: l2_proj = L2Projector("H1", self.mass_ops) - f0e = Z * particles.f0 + f0e = Z * self.pointer["ions"].f0 assert isinstance(f0e, KineticBackground) - rho_eh = FEECVariable(space="H1") - rho_eh.allocate(derham=self.derham, domain=self.domain) - rho_eh.spline.vector = l2_proj.get_dofs(f0e.n) + rho_eh = l2_proj.get_dofs(f0e.n) rho = [rho] rho += [rho_eh] - self.propagators.gc_poisson.options.sigma_1 = 1.0 / epsilon**2 / Z - self.propagators.gc_poisson.options.sigma_2 = 0.0 - self.propagators.gc_poisson.options.sigma_3 = 1.0 / epsilon - self.propagators.gc_poisson.options.stab_mat = "M0ad" - self.propagators.gc_poisson.options.diffusion_mat = "M1perp" - self.propagators.gc_poisson.options.rho = rho - self.propagators.gc_poisson.allocate() + # Get coupling strength + if ions_params["options"]["override_eq_params"]: + self.epsilon = ions_params["options"]["override_eq_params"]["epsilon"] + print( + f"\n!!! Override equation parameters: {self.epsilon = }.", + ) + else: + self.epsilon = self.equation_params["ions"]["epsilon"] + + # set keyword arguments for propagators + self._kwargs[propagators_fields.ImplicitDiffusion] = { + "sigma_1": 1.0 / self.epsilon**2 / Z, # set to zero for Landau damping test + "sigma_2": 0.0, + "sigma_3": 1.0 / self.epsilon, + "stab_mat": "M0ad", + "diffusion_mat": "M1gyro", + "rho": rho, + "solver": solver_params, + } + + self._kwargs[propagators_markers.PushGuidingCenterBxEstar] = { + "phi": self.pointer["phi"], + "evaluate_e_field": True, + "epsilon": self.epsilon / Z, + "algo": ions_params["options"]["PushGuidingCenterBxEstar"]["algo"], + } + + self._kwargs[propagators_markers.PushGuidingCenterParallel] = { + "phi": self.pointer["phi"], + "evaluate_e_field": True, + "epsilon": self.epsilon / Z, + "algo": ions_params["options"]["PushGuidingCenterParallel"]["algo"], + } + + # Initialize propagators used in splitting substeps + self.init_propagators() + + # scalar quantities + self.add_scalar("en_phi") + self.add_scalar("en_particles", compute="from_particles", species="ions") + self.add_scalar("en_tot") - def update_scalar_quantities(self): - phi = self.em_fields.phi.spline.vector - particles = self.kinetic_ions.var.particles - epsilon = self.kinetic_ions.equation_params.epsilon + # MPI operations needed for scalar variables + self._tmp3 = np.empty(1, dtype=float) + self._e_field = self.derham.Vh["1"].zeros() + def update_scalar_quantities(self): # energy from polarization - e1 = self.derham.grad.dot(-phi, out=self._e_field) + e1 = self.derham.grad.dot(-self.pointer["phi"], out=self._e_field) en_phi1 = 0.5 * self.mass_ops.M1gyro.dot_inner(e1, e1) # energy from adiabatic electrons - en_phi = 0.5 / epsilon**2 * self.mass_ops.M0ad.dot_inner(phi, phi) + en_phi = 0.5 / self.epsilon**2 * self.mass_ops.M0ad.dot_inner(self.pointer["phi"], self.pointer["phi"]) # for Landau damping test # en_phi = 0. # mu_p * |B0(eta_p)| - particles.save_magnetic_background_energy() + self.pointer["ions"].save_magnetic_background_energy() # 1/N sum_p (w_p v_p^2/2 + mu_p |B0|_p) self._tmp3[0] = ( 1 - / particles.Np - * xp.sum( - particles.weights * particles.velocities[:, 0] ** 2 / 2.0 + particles.markers_wo_holes_and_ghost[:, 8], + / self.pointer["ions"].Np + * np.sum( + self.pointer["ions"].weights * self.pointer["ions"].velocities[:, 0] ** 2 / 2.0 + + self.pointer["ions"].markers_wo_holes_and_ghost[:, 8], ) ) self.update_scalar("en_phi", en_phi + en_phi1) self.update_scalar("en_particles", self._tmp3[0]) self.update_scalar("en_tot", en_phi + en_phi1 + self._tmp3[0]) - - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "BaseUnits(" in line: - new_file += ["base_units = BaseUnits(kBT=1.0)\n"] - elif "push_gc_bxe.Options" in line: - new_file += [ - "model.propagators.push_gc_bxe.options = model.propagators.push_gc_bxe.Options(phi=model.em_fields.phi)\n", - ] - elif "push_gc_para.Options" in line: - new_file += [ - "model.propagators.push_gc_para.options = model.propagators.push_gc_para.Options(phi=model.em_fields.phi)\n", - ] - elif "set_save_data" in line: - new_file += ["\nbinplot = BinningPlot(slice='e1', n_bins=128, ranges=(0.0, 1.0))\n"] - new_file += ["model.kinetic_ions.set_save_data(binning_plots=(binplot,))\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) diff --git a/src/struphy/models/tests/test_fluid_models.py b/src/struphy/models/tests/test_fluid_models.py new file mode 100644 index 000000000..44473e163 --- /dev/null +++ b/src/struphy/models/tests/test_fluid_models.py @@ -0,0 +1,28 @@ +import inspect + +import pytest + +from struphy.models.tests.util import wrapper_for_testing + + +@pytest.mark.parametrize( + "map_and_equil", [("Cuboid", "HomogenSlab"), ("HollowTorus", "AdhocTorus"), ("Tokamak", "EQDSKequilibrium")] +) +def test_fluid( + map_and_equil: tuple | list, + fast: bool, + vrbose: bool, + verification: bool, + nclones: int, + show_plots: bool, +): + """Tests all models in models/fluid.py.""" + wrapper_for_testing( + mtype="fluid", + map_and_equil=map_and_equil, + fast=fast, + vrbose=vrbose, + verification=verification, + nclones=nclones, + show_plots=show_plots, + ) diff --git a/src/struphy/models/tests/test_hybrid_models.py b/src/struphy/models/tests/test_hybrid_models.py new file mode 100644 index 000000000..fb056a86e --- /dev/null +++ b/src/struphy/models/tests/test_hybrid_models.py @@ -0,0 +1,28 @@ +import inspect + +import pytest + +from struphy.models.tests.util import wrapper_for_testing + + +@pytest.mark.parametrize( + "map_and_equil", [("Cuboid", "HomogenSlab"), ("HollowTorus", "AdhocTorus"), ("Tokamak", "EQDSKequilibrium")] +) +def test_hybrid( + map_and_equil: tuple | list, + fast: bool, + vrbose: bool, + verification: bool, + nclones: int, + show_plots: bool, +): + """Tests all models in models/hybrid.py.""" + wrapper_for_testing( + mtype="hybrid", + map_and_equil=map_and_equil, + fast=fast, + vrbose=vrbose, + verification=verification, + nclones=nclones, + show_plots=show_plots, + ) diff --git a/src/struphy/models/tests/test_kinetic_models.py b/src/struphy/models/tests/test_kinetic_models.py new file mode 100644 index 000000000..33180b74a --- /dev/null +++ b/src/struphy/models/tests/test_kinetic_models.py @@ -0,0 +1,28 @@ +import inspect + +import pytest + +from struphy.models.tests.util import wrapper_for_testing + + +@pytest.mark.parametrize( + "map_and_equil", [("Cuboid", "HomogenSlab"), ("HollowTorus", "AdhocTorus"), ("Tokamak", "EQDSKequilibrium")] +) +def test_kinetic( + map_and_equil: tuple | list, + fast: bool, + vrbose: bool, + verification: bool, + nclones: int, + show_plots: bool, +): + """Tests models in models/kinetic.py.""" + wrapper_for_testing( + mtype="kinetic", + map_and_equil=map_and_equil, + fast=fast, + vrbose=vrbose, + verification=verification, + nclones=nclones, + show_plots=show_plots, + ) diff --git a/src/struphy/models/tests/test_toy_models.py b/src/struphy/models/tests/test_toy_models.py new file mode 100644 index 000000000..8b1f03456 --- /dev/null +++ b/src/struphy/models/tests/test_toy_models.py @@ -0,0 +1,28 @@ +import inspect + +import pytest + +from struphy.models.tests.util import wrapper_for_testing + + +@pytest.mark.parametrize( + "map_and_equil", [("Cuboid", "HomogenSlab"), ("HollowTorus", "AdhocTorus"), ("Tokamak", "EQDSKequilibrium")] +) +def test_toy( + map_and_equil: tuple | list, + fast: bool, + vrbose: bool, + verification: bool, + nclones: int, + show_plots: bool, +): + """Tests models in models/toy.py.""" + wrapper_for_testing( + mtype="toy", + map_and_equil=map_and_equil, + fast=fast, + vrbose=vrbose, + verification=verification, + nclones=nclones, + show_plots=show_plots, + ) diff --git a/src/struphy/models/tests/test_xxpproc.py b/src/struphy/models/tests/test_xxpproc.py index 3d4fef2f0..acb8a6590 100644 --- a/src/struphy/models/tests/test_xxpproc.py +++ b/src/struphy/models/tests/test_xxpproc.py @@ -49,7 +49,7 @@ def test_pproc_codes(model: str = None, group: str = None): elif group == "toy": list_models = list_toy else: - raise ValueError(f"{group =} is not a valid group specification.") + raise ValueError(f"{group = } is not a valid group specification.") if comm.Get_rank() == 0: if model is None: diff --git a/src/struphy/models/tests/verification.py b/src/struphy/models/tests/verification.py index d28fc3d6e..fefcc0e3c 100644 --- a/src/struphy/models/tests/verification.py +++ b/src/struphy/models/tests/verification.py @@ -2,7 +2,6 @@ import pickle from pathlib import Path -import cunumpy as xp import h5py import yaml from matplotlib import pyplot as plt @@ -12,6 +11,7 @@ import struphy from struphy.post_processing import pproc_struphy +from struphy.utils.arrays import xp as np def VlasovAmpereOneSpecies_weakLandau( @@ -40,7 +40,7 @@ def E_exact(t): r = 0.3677 omega = 1.4156 phi = 0.5362 - return 2 * eps**2 * xp.pi / k**2 * r**2 * xp.exp(2 * gamma * t) * xp.cos(omega * t - phi) ** 2 + return 2 * eps**2 * np.pi / k**2 * r**2 * np.exp(2 * gamma * t) * np.cos(omega * t - phi) ** 2 # get parameters with open(os.path.join(path_out, "parameters.yml")) as f: @@ -56,24 +56,24 @@ def E_exact(t): with h5py.File(os.path.join(pa_data, "data_proc0.hdf5"), "r") as f: time = f["time"]["value"][()] E = f["scalar"]["en_E"][()] - logE = xp.log10(E) + logE = np.log10(E) # find where time derivative of E is zero - dEdt = (xp.roll(logE, -1) - xp.roll(logE, 1))[1:-1] / (2.0 * dt) - zeros = dEdt * xp.roll(dEdt, -1) < 0.0 - maxima_inds = xp.logical_and(zeros, dEdt > 0.0) + dEdt = (np.roll(logE, -1) - np.roll(logE, 1))[1:-1] / (2.0 * dt) + zeros = dEdt * np.roll(dEdt, -1) < 0.0 + maxima_inds = np.logical_and(zeros, dEdt > 0.0) maxima = logE[1:-1][maxima_inds] t_maxima = time[1:-1][maxima_inds] # linear fit - linfit = xp.polyfit(t_maxima[:5], maxima[:5], 1) + linfit = np.polyfit(t_maxima[:5], maxima[:5], 1) gamma_num = linfit[0] # plot if show_plots and rank == 0: plt.figure(figsize=(18, 12)) plt.plot(time, logE, label="numerical") - plt.plot(time, xp.log10(E_exact(time)), label="exact") + plt.plot(time, np.log10(E_exact(time)), label="exact") plt.legend() plt.title(f"{dt=}, {algo=}, {Nel=}, {p=}, {ppc=}") plt.xlabel("time [m/c]") @@ -84,9 +84,9 @@ def E_exact(t): plt.show() # assert - rel_error = xp.abs(gamma_num - gamma) / xp.abs(gamma) - assert rel_error < 0.25, f"{rank =}: Assertion for weak Landau damping failed: {gamma_num =} vs. {gamma =}." - print(f"{rank =}: Assertion for weak Landau damping passed ({rel_error =}).") + rel_error = np.abs(gamma_num - gamma) / np.abs(gamma) + assert rel_error < 0.25, f"{rank = }: Assertion for weak Landau damping failed: {gamma_num = } vs. {gamma = }." + print(f"{rank = }: Assertion for weak Landau damping passed ({rel_error = }).") def LinearVlasovAmpereOneSpecies_weakLandau( @@ -115,7 +115,7 @@ def E_exact(t): r = 0.3677 omega = 1.4156 phi = 0.5362 - return 2 * eps**2 * xp.pi / k**2 * r**2 * xp.exp(2 * gamma * t) * xp.cos(omega * t - phi) ** 2 + return 2 * eps**2 * np.pi / k**2 * r**2 * np.exp(2 * gamma * t) * np.cos(omega * t - phi) ** 2 # get parameters with open(os.path.join(path_out, "parameters.yml")) as f: @@ -131,24 +131,24 @@ def E_exact(t): with h5py.File(os.path.join(pa_data, "data_proc0.hdf5"), "r") as f: time = f["time"]["value"][()] E = f["scalar"]["en_E"][()] - logE = xp.log10(E) + logE = np.log10(E) # find where time derivative of E is zero - dEdt = (xp.roll(logE, -1) - xp.roll(logE, 1))[1:-1] / (2.0 * dt) - zeros = dEdt * xp.roll(dEdt, -1) < 0.0 - maxima_inds = xp.logical_and(zeros, dEdt > 0.0) + dEdt = (np.roll(logE, -1) - np.roll(logE, 1))[1:-1] / (2.0 * dt) + zeros = dEdt * np.roll(dEdt, -1) < 0.0 + maxima_inds = np.logical_and(zeros, dEdt > 0.0) maxima = logE[1:-1][maxima_inds] t_maxima = time[1:-1][maxima_inds] # linear fit - linfit = xp.polyfit(t_maxima[:5], maxima[:5], 1) + linfit = np.polyfit(t_maxima[:5], maxima[:5], 1) gamma_num = linfit[0] # plot if show_plots and rank == 0: plt.figure(figsize=(18, 12)) plt.plot(time, logE, label="numerical") - plt.plot(time, xp.log10(E_exact(time)), label="exact") + plt.plot(time, np.log10(E_exact(time)), label="exact") plt.legend() plt.title(f"{dt=}, {algo=}, {Nel=}, {p=}, {ppc=}") plt.xlabel("time [m/c]") @@ -160,9 +160,9 @@ def E_exact(t): # plt.show() # assert - rel_error = xp.abs(gamma_num - gamma) / xp.abs(gamma) - assert rel_error < 0.25, f"{rank =}: Assertion for weak Landau damping failed: {gamma_num =} vs. {gamma =}." - print(f"{rank =}: Assertion for weak Landau damping passed ({rel_error =}).") + rel_error = np.abs(gamma_num - gamma) / np.abs(gamma) + assert rel_error < 0.25, f"{rank = }: Assertion for weak Landau damping failed: {gamma_num = } vs. {gamma = }." + print(f"{rank = }: Assertion for weak Landau damping passed ({rel_error = }).") def IsothermalEulerSPH_soundwave( @@ -190,8 +190,8 @@ def IsothermalEulerSPH_soundwave( MPI.COMM_WORLD.Barrier() path_n_sph = os.path.join(path_pp, "kinetic_data/euler_fluid/n_sph/view_0/") - ee1, ee2, ee3 = xp.load(os.path.join(path_n_sph, "grid_n_sph.npy")) - n_sph = xp.load(os.path.join(path_n_sph, "n_sph.npy")) + ee1, ee2, ee3 = np.load(os.path.join(path_n_sph, "grid_n_sph.npy")) + n_sph = np.load(os.path.join(path_n_sph, "n_sph.npy")) # print(f'{ee1.shape = }, {n_sph.shape = }') if show_plots and rank == 0: @@ -207,7 +207,7 @@ def IsothermalEulerSPH_soundwave( plot_ct = 0 for i in range(0, Nt + 1): if i % interval == 0: - print(f"{i =}") + print(f"{i = }") plot_ct += 1 ax = plt.gca() @@ -218,26 +218,27 @@ def IsothermalEulerSPH_soundwave( plt.plot(x.squeeze(), n_sph[i, :, 0, 0], style, label=f"time={i * dt:4.2f}") plt.xlim(0, 2.5) plt.legend() - ax.set_xticks(xp.linspace(0, 2.5, nx + 1)) + ax.set_xticks(np.linspace(0, 2.5, nx + 1)) ax.xaxis.set_major_formatter(FormatStrFormatter("%.2f")) plt.grid(c="k") plt.xlabel("x") plt.ylabel(r"$\rho$") - plt.title(f"standing sound wave ($c_s = 1$) for {nx =} and {ppb =}") + plt.title(f"standing sound wave ($c_s = 1$) for {nx = } and {ppb = }") if plot_ct == 11: break plt.show() # assert - error = xp.max(xp.abs(n_sph[0] - n_sph[-1])) - print(f"{rank =}: Assertion for SPH sound wave passed ({error =}).") + error = np.max(np.abs(n_sph[0] - n_sph[-1])) + print(f"{rank = }: Assertion for SPH sound wave passed ({error = }).") assert error < 1.3e-3 def Maxwell_coaxial( path_out: str, + rank: int, show_plots: bool = False, ): """Verification test for coaxial cable with Maxwell equations. Comparison w.r.t analytic solution. @@ -250,11 +251,12 @@ def Maxwell_coaxial( path_out : str Simulation output folder (absolute path). + rank : int + MPI rank. + show_plots: bool Whether to show plots.""" - rank = MPI.COMM_WORLD.Get_rank() - if rank == 0: pproc_struphy.main(path_out, physical=True) MPI.COMM_WORLD.Barrier() @@ -262,30 +264,30 @@ def Maxwell_coaxial( def B_z(X, Y, Z, m, t): """Magnetic field in z direction of coaxial cabel""" r = (X**2 + Y**2) ** 0.5 - theta = xp.arctan2(Y, X) - return (jv(m, r) - 0.28 * yn(m, r)) * xp.cos(m * theta - t) + theta = np.arctan2(Y, X) + return (jv(m, r) - 0.28 * yn(m, r)) * np.cos(m * theta - t) def E_r(X, Y, Z, m, t): """Electrical field in radial direction of coaxial cabel""" r = (X**2 + Y**2) ** 0.5 - theta = xp.arctan2(Y, X) - return -m / r * (jv(m, r) - 0.28 * yn(m, r)) * xp.cos(m * theta - t) + theta = np.arctan2(Y, X) + return -m / r * (jv(m, r) - 0.28 * yn(m, r)) * np.cos(m * theta - t) def E_theta(X, Y, Z, m, t): """Electrical field in azimuthal direction of coaxial cabel""" r = (X**2 + Y**2) ** 0.5 - theta = xp.arctan2(Y, X) - return ((m / r * jv(m, r) - jv(m + 1, r)) - 0.28 * (m / r * yn(m, r) - yn(m + 1, r))) * xp.sin(m * theta - t) + theta = np.arctan2(Y, X) + return ((m / r * jv(m, r) - jv(m + 1, r)) - 0.28 * (m / r * yn(m, r) - yn(m + 1, r))) * np.sin(m * theta - t) def to_E_r(X, Y, E_x, E_y): r = (X**2 + Y**2) ** 0.5 - theta = xp.arctan2(Y, X) - return xp.cos(theta) * E_x + xp.sin(theta) * E_y + theta = np.arctan2(Y, X) + return np.cos(theta) * E_x + np.sin(theta) * E_y def to_E_theta(X, Y, E_x, E_y): r = (X**2 + Y**2) ** 0.5 - theta = xp.arctan2(Y, X) - return -xp.sin(theta) * E_x + xp.cos(theta) * E_y + theta = np.arctan2(Y, X) + return -np.sin(theta) * E_x + np.cos(theta) * E_y # get parameters with open(os.path.join(path_out, "parameters.yml")) as f: @@ -297,7 +299,7 @@ def to_E_theta(X, Y, E_x, E_y): pproc_path = os.path.join(path_out, "post_processing/") em_fields_path = os.path.join(pproc_path, "fields_data/em_fields/") - t_grid = xp.load(os.path.join(pproc_path, "t_grid.npy")) + t_grid = np.load(os.path.join(pproc_path, "t_grid.npy")) grids_phy = pickle.loads(Path(os.path.join(pproc_path, "fields_data/grids_phy.bin")).read_bytes()) b_field_phy = pickle.loads(Path(os.path.join(em_fields_path, "b_field_phy.bin")).read_bytes()) e_field_phy = pickle.loads(Path(os.path.join(em_fields_path, "e_field_phy.bin")).read_bytes()) @@ -311,13 +313,7 @@ def to_E_theta(X, Y, E_x, E_y): vmax = E_theta(X, Y, grids_phy[0], modes, 0).max() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) plot_exac = ax1.contourf( - X, - Y, - E_theta(X, Y, grids_phy[0], modes, t_grid[-1]), - cmap="plasma", - levels=100, - vmin=vmin, - vmax=vmax, + X, Y, E_theta(X, Y, grids_phy[0], modes, t_grid[-1]), cmap="plasma", levels=100, vmin=vmin, vmax=vmax ) ax2.contourf( X, @@ -342,26 +338,26 @@ def to_E_theta(X, Y, E_x, E_y): Bz_tend = b_field_phy[t_grid[-1]][2][:, :, 0] Bz_exact = B_z(X, Y, grids_phy[0], modes, t_grid[-1]) - error_Er = xp.max(xp.abs((to_E_r(X, Y, Ex_tend, Ey_tend) - Er_exact))) - error_Etheta = xp.max(xp.abs((to_E_theta(X, Y, Ex_tend, Ey_tend) - Etheta_exact))) - error_Bz = xp.max(xp.abs((Bz_tend - Bz_exact))) + error_Er = np.max(np.abs((to_E_r(X, Y, Ex_tend, Ey_tend) - Er_exact))) + error_Etheta = np.max(np.abs((to_E_theta(X, Y, Ex_tend, Ey_tend) - Etheta_exact))) + error_Bz = np.max(np.abs((Bz_tend - Bz_exact))) - rel_err_Er = error_Er / xp.max(xp.abs(Er_exact)) - rel_err_Etheta = error_Etheta / xp.max(xp.abs(Etheta_exact)) - rel_err_Bz = error_Bz / xp.max(xp.abs(Bz_exact)) + rel_err_Er = error_Er / np.max(np.abs(Er_exact)) + rel_err_Etheta = error_Etheta / np.max(np.abs(Etheta_exact)) + rel_err_Bz = error_Bz / np.max(np.abs(Bz_exact)) - print(f"{rel_err_Er =}") - print(f"{rel_err_Etheta =}") - print(f"{rel_err_Bz =}") + print(f"{rel_err_Er = }") + print(f"{rel_err_Etheta = }") + print(f"{rel_err_Bz = }") - assert rel_err_Bz < 0.0021, f"{rank =}: Assertion for magnetic field Maxwell failed: {rel_err_Bz =}" - print(f"{rank =}: Assertion for magnetic field Maxwell passed ({rel_err_Bz =}).") + assert rel_err_Bz < 0.0021, f"{rank = }: Assertion for magnetic field Maxwell failed: {rel_err_Bz = }" + print(f"{rank = }: Assertion for magnetic field Maxwell passed ({rel_err_Bz = }).") assert rel_err_Etheta < 0.0021, ( - f"{rank =}: Assertion for electric (E_theta) field Maxwell failed: {rel_err_Etheta =}" + f"{rank = }: Assertion for electric (E_theta) field Maxwell failed: {rel_err_Etheta = }" ) - print(f"{rank =}: Assertion for electric field Maxwell passed ({rel_err_Etheta =}).") - assert rel_err_Er < 0.0021, f"{rank =}: Assertion for electric (E_r) field Maxwell failed: {rel_err_Er =}" - print(f"{rank =}: Assertion for electric field Maxwell passed ({rel_err_Er =}).") + print(f"{rank = }: Assertion for electric field Maxwell passed ({rel_err_Etheta = }).") + assert rel_err_Er < 0.0021, f"{rank = }: Assertion for electric (E_r) field Maxwell failed: {rel_err_Er = }" + print(f"{rank = }: Assertion for electric field Maxwell passed ({rel_err_Er = }).") if __name__ == "__main__": diff --git a/src/struphy/models/toy.py b/src/struphy/models/toy.py index fd36b5d5f..e65212d6f 100644 --- a/src/struphy/models/toy.py +++ b/src/struphy/models/toy.py @@ -1,14 +1,6 @@ -import cunumpy as xp -from psydac.ddm.mpi import mpi as MPI - -from struphy.feec.projectors import L2Projector -from struphy.feec.variational_utilities import InternalEnergyEvaluator from struphy.models.base import StruphyModel -from struphy.models.species import FieldSpecies, FluidSpecies, ParticleSpecies -from struphy.models.variables import FEECVariable, PICVariable, SPHVariable, Variable from struphy.propagators import propagators_coupling, propagators_fields, propagators_markers - -rank = MPI.COMM_WORLD.Get_rank() +from struphy.utils.arrays import xp as np class Maxwell(StruphyModel): @@ -31,63 +23,62 @@ class Maxwell(StruphyModel): :ref:`propagators` (called in sequence): 1. :class:`~struphy.propagators.propagators_fields.Maxwell` + + :ref:`Model info `: """ - ## species + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + + dct["em_fields"]["e_field"] = "Hcurl" + dct["em_fields"]["b_field"] = "Hdiv" + return dct - class EMFields(FieldSpecies): - def __init__(self): - self.e_field = FEECVariable(space="Hcurl") - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() + @staticmethod + def bulk_species(): + return None - ## propagators + @staticmethod + def velocity_scale(): + return "light" - class Propagators: - def __init__(self): - self.maxwell = propagators_fields.Maxwell() + @staticmethod + def propagators_dct(): + return {propagators_fields.Maxwell: ["e_field", "b_field"]} - ## abstract methods + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - # 1. instantiate all species - self.em_fields = self.EMFields() + # extract necessary parameters + algo = params["em_fields"]["options"]["Maxwell"]["algo"] + solver = params["em_fields"]["options"]["Maxwell"]["solver"] - # 2. instantiate all propagators - self.propagators = self.Propagators() + # set keyword arguments for propagators + self._kwargs[propagators_fields.Maxwell] = { + "algo": algo, + "solver": solver, + } - # 3. assign variables to propagators - self.propagators.maxwell.variables.e = self.em_fields.e_field - self.propagators.maxwell.variables.b = self.em_fields.b_field + # Initialize propagators used in splitting substeps + self.init_propagators() - # define scalars for update_scalar_quantities + # Scalar variables to be saved during simulation self.add_scalar("electric energy") self.add_scalar("magnetic energy") self.add_scalar("total energy") - @property - def bulk_species(self): - return None - - @property - def velocity_scale(self): - return "light" - - def allocate_helpers(self): - pass - def update_scalar_quantities(self): - en_E = 0.5 * self.mass_ops.M1.dot_inner( - self.em_fields.e_field.spline.vector, - self.em_fields.e_field.spline.vector, - ) - en_B = 0.5 * self.mass_ops.M2.dot_inner( - self.em_fields.b_field.spline.vector, - self.em_fields.b_field.spline.vector, - ) + en_E = 0.5 * self.mass_ops.M1.dot_inner(self.pointer["e_field"], self.pointer["e_field"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b_field"], self.pointer["b_field"]) self.update_scalar("electric energy", en_E) self.update_scalar("magnetic energy", en_B) @@ -113,59 +104,80 @@ class Vlasov(StruphyModel): 1. :class:`~struphy.propagators.propagators_markers.PushVxB` 2. :class:`~struphy.propagators.propagators_markers.PushEta` + + :ref:`Model info `: """ - ## species + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class KineticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles6D") - self.init_variables() + dct["kinetic"]["ions"] = "Particles6D" + return dct - ## propagators + @staticmethod + def bulk_species(): + return "ions" + + @staticmethod + def velocity_scale(): + return "cyclotron" - class Propagators: - def __init__(self): - self.push_vxb = propagators_markers.PushVxB() - self.push_eta = propagators_markers.PushEta() + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushVxB: ["ions"], + propagators_markers.PushEta: ["ions"], + } - ## abstract methods + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}' ***") + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) - # 1. instantiate all species - self.kinetic_ions = self.KineticIons() + # prelim + ions_params = self.kinetic["ions"]["params"] - # 2. instantiate all propagators - self.propagators = self.Propagators() + # project magnetic background + self._b_eq = self.derham.P["2"]( + [ + self.equil.b2_1, + self.equil.b2_2, + self.equil.b2_3, + ] + ) - # 3. assign variables to propagators - self.propagators.push_vxb.variables.ions = self.kinetic_ions.var - self.propagators.push_eta.variables.var = self.kinetic_ions.var + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushVxB] = { + "algo": ions_params["options"]["PushVxB"]["algo"], + "kappa": 1.0, + "b2": self._b_eq, + "b2_add": None, + } - # define scalars for update_scalar_quantities - self.add_scalar("en_f", compute="from_particles", variable=self.kinetic_ions.var) + self._kwargs[propagators_markers.PushEta] = {"algo": ions_params["options"]["PushEta"]["algo"]} - @property - def bulk_species(self): - return self.kinetic_ions + # Initialize propagators used in splitting substeps + self.init_propagators() - @property - def velocity_scale(self): - return "cyclotron" + # Scalar variables to be saved during simulation + self.add_scalar("en_f", compute="from_particles", species="ions") - def allocate_helpers(self): - self._tmp = xp.empty(1, dtype=float) + # MPI operations needed for scalar variables + self._tmp = np.empty(1, dtype=float) def update_scalar_quantities(self): - particles = self.kinetic_ions.var.particles - self._tmp[0] = particles.markers_wo_holes[:, 6].dot( - particles.markers_wo_holes[:, 3] ** 2 - + particles.markers_wo_holes[:, 4] ** 2 - + particles.markers_wo_holes[:, 5] ** 2, - ) / (2 * particles.Np) + self._tmp[0] = self.pointer["ions"].markers_wo_holes[:, 6].dot( + self.pointer["ions"].markers_wo_holes[:, 3] ** 2 + + self.pointer["ions"].markers_wo_holes[:, 4] ** 2 + + self.pointer["ions"].markers_wo_holes[:, 5] ** 2, + ) / (2 * self.pointer["ions"].Np) self.update_scalar("en_f", self._tmp[0]) @@ -201,85 +213,98 @@ class GuidingCenter(StruphyModel): 1. :class:`~struphy.propagators.propagators_markers.PushGuidingCenterBxEstar` 2. :class:`~struphy.propagators.propagators_markers.PushGuidingCenterParallel` - """ - - ## species - class KineticIons(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles5D") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self): - self.push_bxe = propagators_markers.PushGuidingCenterBxEstar() - self.push_parallel = propagators_markers.PushGuidingCenterParallel() - - ## abstract methods - - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}' ***") + :ref:`Model info `: + """ - # 1. instantiate all species - self.kinetic_ions = self.KineticIons() + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - # 2. instantiate all propagators - self.propagators = self.Propagators() + dct["kinetic"]["ions"] = "Particles5D" + return dct - # 3. assign variables to propagators - self.propagators.push_bxe.variables.ions = self.kinetic_ions.var - self.propagators.push_parallel.variables.ions = self.kinetic_ions.var + @staticmethod + def bulk_species(): + return "ions" - # define scalars for update_scalar_quantities - self.add_scalar("en_fv", compute="from_particles", variable=self.kinetic_ions.var) - self.add_scalar("en_fB", compute="from_particles", variable=self.kinetic_ions.var) - self.add_scalar("en_tot", compute="from_particles", variable=self.kinetic_ions.var) + @staticmethod + def velocity_scale(): + return "alfvén" - @property - def bulk_species(self): - return self.kinetic_ions + @staticmethod + def propagators_dct(): + return { + propagators_markers.PushGuidingCenterBxEstar: ["ions"], + propagators_markers.PushGuidingCenterParallel: ["ions"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + # prelim + ions_params = self.kinetic["ions"]["params"] + epsilon = self.equation_params["ions"]["epsilon"] + + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushGuidingCenterBxEstar] = { + "epsilon": epsilon, + "algo": ions_params["options"]["PushGuidingCenterBxEstar"]["algo"], + } + + self._kwargs[propagators_markers.PushGuidingCenterParallel] = { + "epsilon": epsilon, + "algo": ions_params["options"]["PushGuidingCenterParallel"]["algo"], + } + + # Initialize propagators used in splitting substeps + self.init_propagators() - @property - def velocity_scale(self): - return "alfvén" + # Scalar variables to be saved during simulation + self.add_scalar("en_fv", compute="from_particles", species="ions") + self.add_scalar("en_fB", compute="from_particles", species="ions") + self.add_scalar("en_tot", compute="from_particles", species="ions") + self.add_scalar("n_lost_particles", compute="from_particles", species="ions") - def allocate_helpers(self): - self._en_fv = xp.empty(1, dtype=float) - self._en_fB = xp.empty(1, dtype=float) - self._en_tot = xp.empty(1, dtype=float) - self._n_lost_particles = xp.empty(1, dtype=float) + # MPI operations needed for scalar variables + self._en_fv = np.empty(1, dtype=float) + self._en_fB = np.empty(1, dtype=float) + self._en_tot = np.empty(1, dtype=float) + self._n_lost_particles = np.empty(1, dtype=float) def update_scalar_quantities(self): - particles = self.kinetic_ions.var.particles - # particles' kinetic energy - self._en_fv[0] = particles.markers[~particles.holes, 5].dot( - particles.markers[~particles.holes, 3] ** 2, - ) / (2.0 * particles.Np) - particles.save_magnetic_background_energy() + self._en_fv[0] = self.pointer["ions"].markers[~self.pointer["ions"].holes, 5].dot( + self.pointer["ions"].markers[~self.pointer["ions"].holes, 3] ** 2, + ) / (2.0 * self.pointer["ions"].Np) + + self.pointer["ions"].save_magnetic_background_energy() self._en_tot[0] = ( - particles.markers[~particles.holes, 5].dot( - particles.markers[~particles.holes, 8], + self.pointer["ions"] + .markers[~self.pointer["ions"].holes, 5] + .dot( + self.pointer["ions"].markers[~self.pointer["ions"].holes, 8], ) - / particles.Np + / self.pointer["ions"].Np ) self._en_fB[0] = self._en_tot[0] - self._en_fv[0] + self._n_lost_particles[0] = self.pointer["ions"].n_lost_markers + self.update_scalar("en_fv", self._en_fv[0]) self.update_scalar("en_fB", self._en_fB[0]) self.update_scalar("en_tot", self._en_tot[0]) - - self._n_lost_particles[0] = particles.n_lost_markers - self.derham.comm.Allreduce( - MPI.IN_PLACE, - self._n_lost_particles, - op=MPI.SUM, - ) + self.update_scalar("n_lost_particles", self._n_lost_particles[0]) class ShearAlfven(StruphyModel): @@ -308,59 +333,67 @@ class ShearAlfven(StruphyModel): :ref:`Model info `: """ - ## species - class EMFields(FieldSpecies): - def __init__(self): - self.b_field = FEECVariable(space="Hdiv") - self.init_variables() - - class MHD(FluidSpecies): - def __init__(self): - self.velocity = FEECVariable(space="Hdiv") - self.init_variables() + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class Propagators: - def __init__(self) -> None: - self.shear_alf = propagators_fields.ShearAlfven() + dct["em_fields"]["b2"] = "Hdiv" + dct["fluid"]["mhd"] = {"u2": "Hdiv"} + return dct - @property - def bulk_species(self): - return self.mhd + @staticmethod + def bulk_species(): + return "mhd" - @property - def velocity_scale(self): + @staticmethod + def velocity_scale(): return "alfvén" - def allocate_helpers(self): + @staticmethod + def propagators_dct(): + return {propagators_fields.ShearAlfven: ["mhd_u2", "b2"]} + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + from struphy.polar.basic import PolarVector + + # extract necessary parameters + alfven_solver = params["fluid"]["mhd"]["options"]["ShearAlfven"]["solver"] + alfven_algo = params["fluid"]["mhd"]["options"]["ShearAlfven"]["algo"] + # project background magnetic field (2-form) and pressure (3-form) self._b_eq = self.derham.P["2"]( [ self.equil.b2_1, self.equil.b2_2, self.equil.b2_3, - ], + ] ) - # temporary vectors for scalar quantities - self._tmp_b1 = self.derham.Vh["2"].zeros() - self._tmp_b2 = self.derham.Vh["2"].zeros() - - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMFields() - self.mhd = self.MHD() + # set keyword arguments for propagators + self._kwargs[propagators_fields.ShearAlfven] = { + "u_space": "Hdiv", + "solver": alfven_solver, + "algo": alfven_algo, + } - # 2. instantiate all propagators - self.propagators = self.Propagators() - - # 3. assign variables to propagators - self.propagators.shear_alf.variables.u = self.mhd.velocity - self.propagators.shear_alf.variables.b = self.em_fields.b_field + # Initialize propagators used in splitting substeps + self.init_propagators() # Scalar variables to be saved during simulation + # self.add_scalar('en_U') + # self.add_scalar('en_B') + # self.add_scalar('en_B_eq') + # self.add_scalar('en_B_tot') self.add_scalar("en_tot") self.add_scalar("en_U", compute="from_field") @@ -369,13 +402,14 @@ def __init__(self): self.add_scalar("en_B_tot", compute="from_field") self.add_scalar("en_tot2", summands=["en_U", "en_B", "en_B_eq"]) + # temporary vectors for scalar quantities + self._tmp_b1 = self.derham.Vh["2"].zeros() + self._tmp_b2 = self.derham.Vh["2"].zeros() + def update_scalar_quantities(self): # perturbed fields - en_U = 0.5 * self.mass_ops.M2n.dot_inner(self.mhd.velocity.spline.vector, self.mhd.velocity.spline.vector) - en_B = 0.5 * self.mass_ops.M2.dot_inner( - self.em_fields.b_field.spline.vector, - self.em_fields.b_field.spline.vector, - ) + en_U = 0.5 * self.mass_ops.M2n.dot_inner(self.pointer["mhd_u2"], self.pointer["mhd_u2"]) + en_B = 0.5 * self.mass_ops.M2.dot_inner(self.pointer["b2"], self.pointer["b2"]) self.update_scalar("en_U", en_U) self.update_scalar("en_B", en_B) @@ -388,7 +422,7 @@ def update_scalar_quantities(self): # total magnetic field self._b_eq.copy(out=self._tmp_b1) - self._tmp_b1 += self.em_fields.b_field.spline.vector + self._tmp_b1 += self.pointer["b2"] self.mass_ops.M2.dot(self._tmp_b1, apply_bc=False, out=self._tmp_b2) en_Btot = self._tmp_b1.inner(self._tmp_b2) / 2 @@ -421,74 +455,77 @@ class VariationalPressurelessFluid(StruphyModel): :ref:`Model info `: """ - ## species - - class Fluid(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - - ## abstract methods + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["fluid"]["fluid"] = {"rho3": "L2", "uv": "H1vec"} + return dct - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def bulk_species(): + return "fluid" - # 1. instantiate all species - self.fluid = self.Fluid() - - # 2. instantiate all propagators - self.propagators = self.Propagators() + @staticmethod + def velocity_scale(): + return "alfvén" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.fluid.density - self.propagators.variat_dens.variables.u = self.fluid.velocity - self.propagators.variat_mom.variables.u = self.fluid.velocity + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["fluid_rho3", "fluid_uv"], + propagators_fields.VariationalMomentumAdvection: ["fluid_uv"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.mass import WeightedMassOperator + from struphy.feec.variational_utilities import H1vecMassMatrix_density + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + # Initialize mass matrix + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_momentum = params["fluid"]["fluid"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["fluid"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_density = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + + gamma = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": "pressureless", + "gamma": gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() - # define scalars for update_scalar_quantities + # Scalar variables to be saved during simulation self.add_scalar("en_U") - @property - def bulk_species(self): - return self.fluid - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - pass - def update_scalar_quantities(self): - u = self.fluid.velocity.spline.vector - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["fluid_uv"], self.pointer["fluid_uv"]) self.update_scalar("en_U", en_U) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='pressureless')\n", - ] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class VariationalBarotropicFluid(StruphyModel): r"""Barotropic fluid equations discretized with a variational method. @@ -517,84 +554,84 @@ class VariationalBarotropicFluid(StruphyModel): :ref:`Model info `: """ - ## species - - class Fluid(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - - ## abstract methods + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["fluid"]["fluid"] = {"rho3": "L2", "uv": "H1vec"} + return dct - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def bulk_species(): + return "fluid" - # 1. instantiate all species - self.fluid = self.Fluid() - - # 2. instantiate all propagators - self.propagators = self.Propagators() + @staticmethod + def velocity_scale(): + return "alfvén" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.fluid.density - self.propagators.variat_dens.variables.u = self.fluid.velocity - self.propagators.variat_mom.variables.u = self.fluid.velocity + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["fluid_rho3", "fluid_uv"], + propagators_fields.VariationalMomentumAdvection: ["fluid_uv"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.variational_utilities import H1vecMassMatrix_density + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + # Initialize mass matrix + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_momentum = params["fluid"]["fluid"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["fluid"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_density = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + + gamma = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": "barotropic", + "gamma": gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() - # define scalars for update_scalar_quantities + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_thermo") self.add_scalar("en_tot") - @property - def bulk_species(self): - return self.fluid - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): - pass - def update_scalar_quantities(self): - rho = self.fluid.density.spline.vector - u = self.fluid.velocity.spline.vector - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["fluid_uv"], self.pointer["fluid_uv"]) self.update_scalar("en_U", en_U) - en_thermo = 0.5 * self.mass_ops.M3.dot_inner(rho, rho) + en_thermo = 0.5 * self.mass_ops.M3.dot_inner(self.pointer["fluid_rho3"], self.pointer["fluid_rho3"]) self.update_scalar("en_thermo", en_thermo) en_tot = en_U + en_thermo self.update_scalar("en_tot", en_tot) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='barotropic')\n", - ] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class VariationalCompressibleFluid(StruphyModel): r"""Fully compressible fluid equations discretized with a variational method. @@ -626,71 +663,106 @@ class VariationalCompressibleFluid(StruphyModel): :ref:`Model info `: """ - ## species - - class Fluid(FluidSpecies): - def __init__(self): - self.density = FEECVariable(space="L2") - self.velocity = FEECVariable(space="H1vec") - self.entropy = FEECVariable(space="L2") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self): - self.variat_dens = propagators_fields.VariationalDensityEvolve() - self.variat_mom = propagators_fields.VariationalMomentumAdvection() - self.variat_ent = propagators_fields.VariationalEntropyEvolve() - - ## abstract methods + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + dct["fluid"]["fluid"] = {"rho3": "L2", "s3": "L2", "uv": "H1vec"} + return dct - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def bulk_species(): + return "fluid" - # 1. instantiate all species - self.fluid = self.Fluid() - - # 2. instantiate all propagators - self.propagators = self.Propagators() + @staticmethod + def velocity_scale(): + return "alfvén" - # 3. assign variables to propagators - self.propagators.variat_dens.variables.rho = self.fluid.density - self.propagators.variat_dens.variables.u = self.fluid.velocity - self.propagators.variat_mom.variables.u = self.fluid.velocity - self.propagators.variat_ent.variables.s = self.fluid.entropy - self.propagators.variat_ent.variables.u = self.fluid.velocity + @staticmethod + def propagators_dct(): + return { + propagators_fields.VariationalDensityEvolve: ["fluid_rho3", "fluid_uv"], + propagators_fields.VariationalMomentumAdvection: ["fluid_uv"], + propagators_fields.VariationalEntropyEvolve: ["fluid_s3", "fluid_uv"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import H1vecMassMatrix_density + + # initialize base class + super().__init__(params, comm=comm, clone_config=clone_config) + + # Initialize mass matrix + self.WMM = H1vecMassMatrix_density(self.derham, self.mass_ops, self.domain) + + # Initialize propagators/integrators used in splitting substeps + lin_solver_momentum = params["fluid"]["fluid"]["options"]["VariationalMomentumAdvection"]["lin_solver"] + nonlin_solver_momentum = params["fluid"]["fluid"]["options"]["VariationalMomentumAdvection"]["nonlin_solver"] + lin_solver_density = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["lin_solver"] + nonlin_solver_density = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["nonlin_solver"] + lin_solver_entropy = params["fluid"]["fluid"]["options"]["VariationalEntropyEvolve"]["lin_solver"] + nonlin_solver_entropy = params["fluid"]["fluid"]["options"]["VariationalEntropyEvolve"]["nonlin_solver"] + + self._gamma = params["fluid"]["fluid"]["options"]["VariationalDensityEvolve"]["physics"]["gamma"] + model = "full" + + from struphy.feec.variational_utilities import InternalEnergyEvaluator + + self._energy_evaluator = InternalEnergyEvaluator(self.derham, self._gamma) + + # set keyword arguments for propagators + self._kwargs[propagators_fields.VariationalDensityEvolve] = { + "model": model, + "s": self.pointer["fluid_s3"], + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_density, + "nonlin_solver": nonlin_solver_density, + "energy_evaluator": self._energy_evaluator, + } + + self._kwargs[propagators_fields.VariationalMomentumAdvection] = { + "mass_ops": self.WMM, + "lin_solver": lin_solver_momentum, + "nonlin_solver": nonlin_solver_momentum, + } + + self._kwargs[propagators_fields.VariationalEntropyEvolve] = { + "model": model, + "rho": self.pointer["fluid_rho3"], + "gamma": self._gamma, + "mass_ops": self.WMM, + "lin_solver": lin_solver_entropy, + "nonlin_solver": nonlin_solver_entropy, + "energy_evaluator": self._energy_evaluator, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() - # define scalars for update_scalar_quantities + # Scalar variables to be saved during simulation self.add_scalar("en_U") self.add_scalar("en_thermo") self.add_scalar("en_tot") - @property - def bulk_species(self): - return self.fluid - - @property - def velocity_scale(self): - return "alfvén" - - def allocate_helpers(self): + # temporary vectors for scalar quantities projV3 = L2Projector("L2", self._mass_ops) def f(e1, e2, e3): return 1 - f = xp.vectorize(f) + f = np.vectorize(f) self._integrator = projV3(f) - self._energy_evaluator = InternalEnergyEvaluator(self.derham, self.propagators.variat_ent.options.gamma) - def update_scalar_quantities(self): - rho = self.fluid.density.spline.vector - u = self.fluid.velocity.spline.vector - - en_U = 0.5 * self.mass_ops.WMM.massop.dot_inner(u, u) + en_U = 0.5 * self.WMM.massop.dot_inner(self.pointer["fluid_uv"], self.pointer["fluid_uv"]) self.update_scalar("en_U", en_U) en_thermo = self.update_thermo_energy() @@ -698,45 +770,15 @@ def update_scalar_quantities(self): en_tot = en_U + en_thermo self.update_scalar("en_tot", en_tot) - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "variat_dens.Options" in line: - new_file += [ - "model.propagators.variat_dens.options = model.propagators.variat_dens.Options(model='full',\n", - ] - new_file += [ - " s=model.fluid.entropy)\n", - ] - elif "variat_ent.Options" in line: - new_file += [ - "model.propagators.variat_ent.options = model.propagators.variat_ent.Options(model='full',\n", - ] - new_file += [ - " rho=model.fluid.density)\n", - ] - elif "entropy.add_background" in line: - new_file += ["model.fluid.density.add_background(FieldsBackground())\n"] - new_file += [line] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - def update_thermo_energy(self): """Reuse tmp used in VariationalEntropyEvolve to compute the thermodynamical energy. :meta private: """ - en_prop = self.propagators.variat_ent + en_prop = self._propagators[2] - self._energy_evaluator.sf.vector = self.fluid.entropy.spline.vector - self._energy_evaluator.rhof.vector = self.fluid.density.spline.vector + self._energy_evaluator.sf.vector = self.pointer["fluid_s3"] + self._energy_evaluator.rhof.vector = self.pointer["fluid_rho3"] sf_values = self._energy_evaluator.sf.eval_tp_fixed_loc( self._energy_evaluator.integration_grid_spans, self._energy_evaluator.integration_grid_bd, @@ -757,7 +799,7 @@ def update_thermo_energy(self): def __ener(self, rho, s): """Themodynamical energy as a function of rho and s, usign the perfect gaz hypothesis E(rho, s) = rho^gamma*exp(s/rho)""" - return xp.power(rho, self.propagators.variat_ent.options.gamma) * xp.exp(s / rho) + return np.power(rho, self._gamma) * np.exp(s / rho) class Poisson(StruphyModel): @@ -788,89 +830,65 @@ class Poisson(StruphyModel): :ref:`Model info `: """ - ## species - - class EMFields(FieldSpecies): - def __init__(self): - self.phi = FEECVariable(space="H1") - self.source = FEECVariable(space="H1") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self): - self.source = propagators_fields.TimeDependentSource() - self.poisson = propagators_fields.Poisson() - - ## abstract methods - - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - # 1. instantiate all species - self.em_fields = self.EMFields() + dct["em_fields"]["phi"] = "H1" + dct["em_fields"]["source"] = "H1" + return dct - # 2. instantiate all propagators - self.propagators = self.Propagators() - - # 3. assign variables to propagators - self.propagators.source.variables.source = self.em_fields.source - self.propagators.poisson.variables.phi = self.em_fields.phi - - @property - def bulk_species(self): + @staticmethod + def bulk_species(): return None - @property - def velocity_scale(self): + @staticmethod + def velocity_scale(): return None - def allocate_helpers(self): - pass + @staticmethod + def propagators_dct(): + return { + propagators_fields.TimeDependentSource: ["source"], + propagators_fields.ImplicitDiffusion: ["phi"], + } + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + def __init__(self, params, comm, clone_config=None): + super().__init__(params, comm=comm, clone_config=clone_config) + + # extract necessary parameters + model_params = params["em_fields"]["options"]["ImplicitDiffusion"]["model"] + solver_params = params["em_fields"]["options"]["ImplicitDiffusion"]["solver"] + omega = params["em_fields"]["options"]["TimeDependentSource"]["omega"] + hfun = params["em_fields"]["options"]["TimeDependentSource"]["hfun"] + + # set keyword arguments for propagators + self._kwargs[propagators_fields.TimeDependentSource] = { + "omega": omega, + "hfun": hfun, + } + + self._kwargs[propagators_fields.ImplicitDiffusion] = { + "sigma_1": model_params["sigma_1"], + "stab_mat": model_params["stab_mat"], + "diffusion_mat": model_params["diffusion_mat"], + "rho": self.pointer["source"], + "solver": solver_params, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() def update_scalar_quantities(self): pass - def allocate_propagators(self): - """Solve initial Poisson equation. - - :meta private: - """ - - # initialize fields and particles - super().allocate_propagators() - - # # use setter to assign source - # self.propagators.poisson.rho = self.mass_ops.M0.dot(self.em_fields.source.spline.vector) - - # Solve with dt=1. and compute electric field - if MPI.COMM_WORLD.Get_rank() == 0: - print("\nSolving initial Poisson problem...") - - self.propagators.poisson(1.0) - - if MPI.COMM_WORLD.Get_rank() == 0: - print("Done.") - - # default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "poisson.Options" in line: - new_file += [ - "model.propagators.poisson.options = model.propagators.poisson.Options(rho=model.em_fields.source)\n", - ] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class DeterministicParticleDiffusion(StruphyModel): r"""Diffusion equation discretized with a deterministic particle method; @@ -898,49 +916,60 @@ class DeterministicParticleDiffusion(StruphyModel): :ref:`Model info `: """ - ## species + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class Hydrogen(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles3D") - self.init_variables() + dct["kinetic"]["species1"] = "Particles3D" + return dct - ## propagators + @staticmethod + def bulk_species(): + return "species1" - class Propagators: - def __init__(self): - self.det_diff = propagators_markers.PushDeterministicDiffusion() + @staticmethod + def velocity_scale(): + return None - ## abstract methods + @staticmethod + def propagators_dct(): + return {propagators_markers.PushDeterministicDiffusion: ["species1"]} - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] - # 1. instantiate all species - self.hydrogen = self.Hydrogen() + def __init__(self, params, comm, clone_config=None): + super().__init__(params, comm=comm, clone_config=clone_config) - # 2. instantiate all propagators - self.propagators = self.Propagators() + # prelim + params = self.kinetic["species1"]["params"] + algo = params["options"]["PushDeterministicDiffusion"]["algo"] + diffusion_coefficient = params["options"]["PushDeterministicDiffusion"]["diffusion_coefficient"] - # 3. assign variables to propagators - self.propagators.det_diff.variables.var = self.hydrogen.var + # # project magnetic background + # self._b_eq = self.derham.P['2']([self.equil.b2_1, + # self.equil.b2_2, + # self.equil.b2_3]) - # define scalars for update_scalar_quantities - # self.add_scalar("electric energy") - # self.add_scalar("magnetic energy") - # self.add_scalar("total energy") + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushDeterministicDiffusion] = { + "algo": algo, + "bc_type": params["markers"]["bc"], + "diffusion_coefficient": diffusion_coefficient, + } - @property - def bulk_species(self): - return self.hydrogen + # Initialize propagators used in splitting substeps + self.init_propagators() - @property - def velocity_scale(self): - return None + # Scalar variables to be saved during simulation + self.add_scalar("en_f") - def allocate_helpers(self): - pass + # MPI operations needed for scalar variables + self._tmp = np.empty(1, dtype=float) def update_scalar_quantities(self): pass @@ -971,49 +1000,60 @@ class RandomParticleDiffusion(StruphyModel): :ref:`Model info `: """ - ## species + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - class Hydrogen(ParticleSpecies): - def __init__(self): - self.var = PICVariable(space="Particles3D") - self.init_variables() + dct["kinetic"]["species1"] = "Particles3D" + return dct - ## propagators + @staticmethod + def bulk_species(): + return "species1" - class Propagators: - def __init__(self): - self.rand_diff = propagators_markers.PushRandomDiffusion() + @staticmethod + def velocity_scale(): + return None - ## abstract methods + @staticmethod + def propagators_dct(): + return {propagators_markers.PushRandomDiffusion: ["species1"]} - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] - # 1. instantiate all species - self.hydrogen = self.Hydrogen() + def __init__(self, params, comm, clone_config=None): + super().__init__(params, comm=comm, clone_config=clone_config) - # 2. instantiate all propagators - self.propagators = self.Propagators() + # prelim + species1_params = self.kinetic["species1"]["params"] + algo = species1_params["options"]["PushRandomDiffusion"]["algo"] + diffusion_coefficient = species1_params["options"]["PushRandomDiffusion"]["diffusion_coefficient"] - # 3. assign variables to propagators - self.propagators.rand_diff.variables.var = self.hydrogen.var + # # project magnetic background + # self._b_eq = self.derham.P['2']([self.equil.b2_1, + # self.equil.b2_2, + # self.equil.b2_3]) - # define scalars for update_scalar_quantities - # self.add_scalar("electric energy") - # self.add_scalar("magnetic energy") - # self.add_scalar("total energy") + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushRandomDiffusion] = { + "algo": algo, + "bc_type": species1_params["markers"]["bc"], + "diffusion_coefficient": diffusion_coefficient, + } - @property - def bulk_species(self): - return self.hydrogen + # Initialize propagators used in splitting substeps + self.init_propagators() - @property - def velocity_scale(self): - return None + # Scalar variables to be saved during simulation + self.add_scalar("en_f") - def allocate_helpers(self): - pass + # MPI operations needed for scalar variables + self._tmp = np.empty(1, dtype=float) def update_scalar_quantities(self): pass @@ -1028,9 +1068,7 @@ class PressureLessSPH(StruphyModel): &\partial_t \rho + \nabla \cdot ( \rho \mathbf u ) = 0 \,, \\[4mm] - &\partial_t (\rho \mathbf u) + \nabla \cdot (\rho \mathbf u \otimes \mathbf u) = - \nabla \phi_0 \,, - - where :math:`\phi_0` is a static external potential. + &\partial_t (\rho \mathbf u) + \nabla \cdot (\rho \mathbf u \otimes \mathbf u) = 0 \,. :ref:`propagators` (called in sequence): @@ -1039,81 +1077,66 @@ class PressureLessSPH(StruphyModel): This is discretized by particles going in straight lines. """ - ## species - - class ColdFluid(ParticleSpecies): - def __init__(self): - self.var = SPHVariable() - self.init_variables() + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} - ## propagators + dct["kinetic"]["p_fluid"] = "ParticlesSPH" + return dct - class Propagators: - def __init__(self): - self.push_eta = propagators_markers.PushEta() - self.push_v = propagators_markers.PushVinEfield() + @staticmethod + def bulk_species(): + return "p_fluid" - ## abstract methods - - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") + @staticmethod + def velocity_scale(): + return None - # 1. instantiate all species - self.cold_fluid = self.ColdFluid() + @staticmethod + def diagnostics_dct(): + dct = {} + dct["projected_density"] = "L2" + return dct - # 2. instantiate all propagators - self.propagators = self.Propagators() + @staticmethod + def propagators_dct(): + return {propagators_markers.PushEta: ["p_fluid"]} - # 3. assign variables to propagators - self.propagators.push_eta.variables.var = self.cold_fluid.var - self.propagators.push_v.variables.var = self.cold_fluid.var + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] - # define scalars for update_scalar_quantities - self.add_scalar("en_kin", compute="from_particles", variable=self.cold_fluid.var) + def __init__(self, params, comm, clone_config=None): + super().__init__(params, comm=comm, clone_config=clone_config) - @property - def bulk_species(self): - return self.cold_fluid + # prelim + p_fluid_params = self.kinetic["p_fluid"]["params"] + algo_eta = params["kinetic"]["p_fluid"]["options"]["PushEta"]["algo"] - @property - def velocity_scale(self): - return None + # set keyword arguments for propagators + self._kwargs[propagators_markers.PushEta] = { + "algo": algo_eta, + "density_field": self.pointer["projected_density"], + } - # @staticmethod - # def diagnostics_dct(): - # dct = {} - # dct["projected_density"] = "L2" - # return dct + # Initialize propagators used in splitting substeps + self.init_propagators() - def allocate_helpers(self): - pass + # Scalar variables to be saved during simulation + self.add_scalar("en_kin", compute="from_particles", species="p_fluid") def update_scalar_quantities(self): - particles = self.cold_fluid.var.particles - valid_parts = particles.markers_wo_holes_and_ghost - en_kin = valid_parts[:, 6].dot(valid_parts[:, 3] ** 2 + valid_parts[:, 4] ** 2 + valid_parts[:, 5] ** 2) / ( - 2.0 * particles.Np - ) + en_kin = self.pointer["p_fluid"].markers_wo_holes_and_ghost[:, 6].dot( + self.pointer["p_fluid"].markers_wo_holes_and_ghost[:, 3] ** 2 + + self.pointer["p_fluid"].markers_wo_holes_and_ghost[:, 4] ** 2 + + self.pointer["p_fluid"].markers_wo_holes_and_ghost[:, 5] ** 2 + ) / (2.0 * self.pointer["p_fluid"].Np) self.update_scalar("en_kin", en_kin) - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "push_v.Options" in line: - new_file += ["phi = equil.p0\n"] - new_file += ["model.propagators.push_v.options = model.propagators.push_v.Options(phi=phi)\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) - class TwoFluidQuasiNeutralToy(StruphyModel): r"""Linearized, quasi-neutral two-fluid model with zero electron inertia. @@ -1153,75 +1176,115 @@ class TwoFluidQuasiNeutralToy(StruphyModel): in plasma physics, Journal of Computational Physics 2018. """ - ## species - - class EMfields(FieldSpecies): - def __init__(self): - self.phi = FEECVariable(space="L2") - self.init_variables() - - class Ions(FluidSpecies): - def __init__(self): - self.u = FEECVariable(space="Hdiv") - self.init_variables() - - class Electrons(FluidSpecies): - def __init__(self): - self.u = FEECVariable(space="Hdiv") - self.init_variables() - - ## propagators - - class Propagators: - def __init__(self): - self.qn_full = propagators_fields.TwoFluidQuasiNeutralFull() - - ## abstract methods - - def __init__(self): - if rank == 0: - print(f"\n*** Creating light-weight instance of model '{self.__class__.__name__}':") - - # 1. instantiate all species - self.em_fields = self.EMfields() - self.ions = self.Ions() - self.electrons = self.Electrons() - - # 2. instantiate all propagators - self.propagators = self.Propagators() + @staticmethod + def species(): + dct = {"em_fields": {}, "fluid": {}, "kinetic": {}} + + dct["em_fields"]["potential"] = "L2" + dct["fluid"]["ions"] = { + "u": "Hdiv", + } + dct["fluid"]["electrons"] = { + "u": "Hdiv", + } + return dct + + @staticmethod + def bulk_species(): + return "ions" + + @staticmethod + def velocity_scale(): + return "thermal" - # 3. assign variables to propagators - self.propagators.qn_full.variables.u = self.ions.u - self.propagators.qn_full.variables.ue = self.electrons.u - self.propagators.qn_full.variables.phi = self.em_fields.phi + @staticmethod + def propagators_dct(): + return {propagators_fields.TwoFluidQuasiNeutralFull: ["ions_u", "electrons_u", "potential"]} + + __em_fields__ = species()["em_fields"] + __fluid_species__ = species()["fluid"] + __kinetic_species__ = species()["kinetic"] + __bulk_species__ = bulk_species() + __velocity_scale__ = velocity_scale() + __propagators__ = [prop.__name__ for prop in propagators_dct()] + + # add special options + @classmethod + def options(cls): + dct = super().options() + cls.add_option( + species=["fluid", "electrons"], + option=propagators_fields.TwoFluidQuasiNeutralFull, + dct=dct, + ) + return dct - # define scalars for update_scalar_quantities + def __init__(self, params, comm, clone_config=None): + super().__init__(params, comm=comm, clone_config=clone_config) - @property - def bulk_species(self): - return self.ions + # get species paramaters + electrons_params = params["fluid"]["electrons"] - @property - def velocity_scale(self): - return "thermal" - - def allocate_helpers(self): - pass + # Get coupling strength + if electrons_params["options"]["TwoFluidQuasiNeutralFull"]["override_eq_params"]: + self._epsilon = electrons_params["options"]["TwoFluidQuasiNeutralFull"]["eps_norm"] + print( + f"\n!!! Override equation parameters: {self._epsilon = }.", + ) + else: + self._epsilon = self.equation_params["electrons"]["epsilon"] + + # extract necessary parameters + stokes_solver = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["solver"] + stokes_nu = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["nu"] + stokes_nu_e = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["nu_e"] + stokes_a = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["a"] + stokes_R0 = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["R0"] + stokes_B0 = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["B0"] + stokes_Bp = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["Bp"] + stokes_alpha = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["alpha"] + stokes_beta = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["beta"] + stokes_sigma = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["stab_sigma"] + stokes_variant = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["variant"] + stokes_method_to_solve = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["method_to_solve"] + stokes_preconditioner = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["preconditioner"] + stokes_spectralanalysis = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"][ + "spectralanalysis" + ] + stokes_lifting = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["lifting"] + stokes_dimension = params["fluid"]["electrons"]["options"]["TwoFluidQuasiNeutralFull"]["dimension"] + stokes_1D_dt = params["time"]["dt"] + + # Check MPI size to ensure only one MPI process + if comm is not None and stokes_variant == "Uzawa": + if comm.Get_rank() == 0: + print(f"Error: TwoFluidQuasiNeutralToy only runs with one MPI process.") + return # Early return to stop execution for multiple MPI processes + + # set keyword arguments for propagators + self._kwargs[propagators_fields.TwoFluidQuasiNeutralFull] = { + "solver": stokes_solver, + "nu": stokes_nu, + "nu_e": stokes_nu_e, + "eps_norm": self._epsilon, + "a": stokes_a, + "R0": stokes_R0, + "B0": stokes_B0, + "Bp": stokes_Bp, + "alpha": stokes_alpha, + "beta": stokes_beta, + "stab_sigma": stokes_sigma, + "variant": stokes_variant, + "method_to_solve": stokes_method_to_solve, + "preconditioner": stokes_preconditioner, + "spectralanalysis": stokes_spectralanalysis, + "dimension": stokes_dimension, + "D1_dt": stokes_1D_dt, + "lifting": stokes_lifting, + } + + # Initialize propagators used in splitting substeps + self.init_propagators() def update_scalar_quantities(self): pass - - ## default parameters - def generate_default_parameter_file(self, path=None, prompt=True): - params_path = super().generate_default_parameter_file(path=path, prompt=prompt) - new_file = [] - with open(params_path, "r") as f: - for line in f: - if "BaseUnits()" in line: - new_file += ["base_units = BaseUnits(kBT=1.0)\n"] - else: - new_file += [line] - - with open(params_path, "w") as f: - for line in new_file: - f.write(line) diff --git a/src/struphy/ode/solvers.py b/src/struphy/ode/solvers.py index c6d6366b9..414afd8e0 100644 --- a/src/struphy/ode/solvers.py +++ b/src/struphy/ode/solvers.py @@ -1,10 +1,10 @@ from inspect import signature -import cunumpy as xp from psydac.linalg.block import BlockVector from psydac.linalg.stencil import StencilVector from struphy.ode.utils import ButcherTableau +from struphy.utils.arrays import xp as np class ODEsolverFEEC: @@ -31,10 +31,10 @@ class ODEsolverFEEC: def __init__( self, vector_field: dict, - butcher: ButcherTableau = ButcherTableau(), + algo: str = "rk4", ): # get algorithm - self._butcher = butcher + self._butcher = ButcherTableau(algo=algo) # check arguments and allocate k for each stage self._k = {} @@ -51,6 +51,7 @@ def __init__( self._k[vec] += [vec.space.zeros()] self._vector_field = vector_field + self._algo = algo # collect unknows in list self._y = list(self.vector_field.keys()) @@ -95,6 +96,11 @@ def vector_field(self): values are callables representing the respective component of the vector field.""" return self._vector_field + @property + def algo(self): + """See :class:`~struphy.ode.utils.ButcherTableau` for available algorithms.""" + return self._algo + @property def y(self): """List of variables to be updated.""" diff --git a/src/struphy/ode/tests/test_ode_feec.py b/src/struphy/ode/tests/test_ode_feec.py index 7dfa87a46..4c3db40ae 100644 --- a/src/struphy/ode/tests/test_ode_feec.py +++ b/src/struphy/ode/tests/test_ode_feec.py @@ -1,8 +1,6 @@ -from typing import get_args - import pytest -from struphy.ode.utils import OptsButcher +from struphy.ode.utils import ButcherTableau @pytest.mark.parametrize( @@ -15,12 +13,11 @@ ("1", "0", "2"), ], ) -@pytest.mark.parametrize("algo", get_args(OptsButcher)) +@pytest.mark.parametrize("algo", ButcherTableau.available_methods()) def test_exp_growth(spaces, algo, show_plots=False): """Solve dy/dt = omega*y for different feec variables y and with all available solvers from the ButcherTableau.""" - import cunumpy as xp from matplotlib import pyplot as plt from psydac.ddm.mpi import mpi as MPI from psydac.linalg.block import BlockVector @@ -28,7 +25,7 @@ def test_exp_growth(spaces, algo, show_plots=False): from struphy.feec.psydac_derham import Derham from struphy.ode.solvers import ODEsolverFEEC - from struphy.ode.utils import ButcherTableau + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -40,7 +37,7 @@ def test_exp_growth(spaces, algo, show_plots=False): c0 = 1.2 omega = 2.3 - y_exact = lambda t: c0 * xp.exp(omega * t) + y_exact = lambda t: c0 * np.exp(omega * t) vector_field = {} for i, space in enumerate(spaces): @@ -100,11 +97,10 @@ def f(t, y1, y2, y3, out=out): vector_field[var] = f - print(f"{vector_field =}") - butcher = ButcherTableau(algo=algo) - print(f"{butcher =}") + print(f"{vector_field = }") + print(f"{algo = }") - solver = ODEsolverFEEC(vector_field, butcher=butcher) + solver = ODEsolverFEEC(vector_field, algo=algo) hs = [0.1] n_hs = 6 @@ -117,8 +113,8 @@ def f(t, y1, y2, y3, out=out): errors = {} for i, h in enumerate(hs): errors[h] = {} - time = xp.linspace(0, Tend, int(Tend / h) + 1) - print(f"{h =}, {time.size =}") + time = np.linspace(0, Tend, int(Tend / h) + 1) + print(f"{h = }, {time.size = }") yvec = y_exact(time) ymax = {} for var in vector_field: @@ -129,17 +125,17 @@ def f(t, y1, y2, y3, out=out): for b in var.blocks: b[:] = c0 var.update_ghost_regions() - ymax[var] = c0 * xp.ones_like(time) + ymax[var] = c0 * np.ones_like(time) for n in range(time.size - 1): tn = h * n solver(tn, h) for var in vector_field: - ymax[var][n + 1] = xp.max(var.toarray()) + ymax[var][n + 1] = np.max(var.toarray()) # checks for var in vector_field: - errors[h][var] = h * xp.sum(xp.abs(yvec - ymax[var])) / (h * xp.sum(xp.abs(yvec))) - print(f"{errors[h][var] =}") + errors[h][var] = h * np.sum(np.abs(yvec - ymax[var])) / (h * np.sum(np.abs(yvec))) + print(f"{errors[h][var] = }") assert errors[h][var] < 0.31 if rank == 0: @@ -161,16 +157,16 @@ def f(t, y1, y2, y3, out=out): h_vec += [h] err_vec += [dct[var]] - m, _ = xp.polyfit(xp.log(h_vec), xp.log(err_vec), deg=1) - print(f"{spaces[j]}-space, fitted convergence rate = {m} for {algo =} with {solver.butcher.conv_rate =}") - assert xp.abs(m - solver.butcher.conv_rate) < 0.1 - print(f"Convergence check passed on {rank =}.") + m, _ = np.polyfit(np.log(h_vec), np.log(err_vec), deg=1) + print(f"{spaces[j]}-space, fitted convergence rate = {m} for {algo = } with {solver.butcher.conv_rate = }") + assert np.abs(m - solver.butcher.conv_rate) < 0.1 + print(f"Convergence check passed on {rank = }.") if rank == 0: - plt.loglog(h_vec, h_vec, "--", label="h") - plt.loglog(h_vec, [h**2 for h in h_vec], "--", label="h^2") - plt.loglog(h_vec, [h**3 for h in h_vec], "--", label="h^3") - plt.loglog(h_vec, [h**4 for h in h_vec], "--", label="h^4") + plt.loglog(h_vec, h_vec, "--", label=f"h") + plt.loglog(h_vec, [h**2 for h in h_vec], "--", label=f"h^2") + plt.loglog(h_vec, [h**3 for h in h_vec], "--", label=f"h^3") + plt.loglog(h_vec, [h**4 for h in h_vec], "--", label=f"h^4") plt.loglog(h_vec, err_vec, "o-k", label=f"{spaces[j]}-space, {algo}") if rank == 0: plt.xlabel("log(h)") diff --git a/src/struphy/ode/utils.py b/src/struphy/ode/utils.py index 6748d07f1..bdabf4406 100644 --- a/src/struphy/ode/utils.py +++ b/src/struphy/ode/utils.py @@ -1,19 +1,6 @@ -from dataclasses import dataclass -from typing import Literal, get_args +from struphy.utils.arrays import xp as np -import cunumpy as xp -OptsButcher = Literal[ - "rk4", - "forward_euler", - "heun2", - "rk2", - "heun3", - "3/8 rule", -] - - -@dataclass class ButcherTableau: r""" Butcher tableau for explicit s-stage Runge-Kutta methods. @@ -26,62 +13,72 @@ class ButcherTableau: Parameters ---------- - algo : OptsButcher + algo : str Name of the RK method. """ - algo: OptsButcher = "rk4" - - def __post_init__(self): + @staticmethod + def available_methods(): + meth_avail = [ + "rk4", + "forward_euler", + "heun2", + "rk2", + "heun3", + "3/8 rule", + ] + return meth_avail + + def __init__(self, algo: str = "rk4"): # choose algorithm - if self.algo == "forward_euler": + if algo == "forward_euler": a = () b = (1.0,) c = (0.0,) conv_rate = 1 - elif self.algo == "heun2": + elif algo == "heun2": a = ((1.0,),) b = (1 / 2, 1 / 2) c = (0.0, 1.0) conv_rate = 2 - elif self.algo == "rk2": + elif algo == "rk2": a = ((1 / 2,),) b = (0.0, 1.0) c = (0.0, 1 / 2) conv_rate = 2 - elif self.algo == "heun3": + elif algo == "heun3": a = ((1 / 3,), (0.0, 2 / 3)) b = (1 / 4, 0.0, 3 / 4) c = (0.0, 1 / 3, 2 / 3) conv_rate = 3 - elif self.algo == "rk4": + elif algo == "rk4": a = ((1 / 2,), (0.0, 1 / 2), (0.0, 0.0, 1.0)) b = (1 / 6, 1 / 3, 1 / 3, 1 / 6) c = (0.0, 1 / 2, 1 / 2, 1.0) conv_rate = 4 - elif self.algo == "3/8 rule": + elif algo == "3/8 rule": a = ((1 / 3,), (-1 / 3, 1.0), (1.0, -1.0, 1.0)) b = (1 / 8, 3 / 8, 3 / 8, 1 / 8) c = (0.0, 1 / 3, 2 / 3, 1.0) conv_rate = 4 else: - raise NotImplementedError(f"Chosen algorithm {self.algo} is not implemented.") + raise NotImplementedError("Chosen algorithm is not implemented.") - self._b = xp.array(b) - self._c = xp.array(c) + self._b = np.array(b) + self._c = np.array(c) assert self._b.size == self._c.size self._n_stages = self._b.size assert len(a) == self.n_stages - 1 - self._a = xp.tri(self.n_stages, k=-1) + self._a = np.tri(self.n_stages, k=-1) for l, st in enumerate(a): assert len(st) == l + 1 self._a[l + 1, : l + 1] = st self._conv_rate = conv_rate - __available_methods__ = get_args(OptsButcher) + __available_methods__ = available_methods() @property def a(self): diff --git a/src/struphy/pic/accumulation/accum_kernels.py b/src/struphy/pic/accumulation/accum_kernels.py index 2a82a9bcf..8d3c2923b 100644 --- a/src/struphy/pic/accumulation/accum_kernels.py +++ b/src/struphy/pic/accumulation/accum_kernels.py @@ -33,6 +33,7 @@ def charge_density_0form( args_derham: "DerhamArguments", args_domain: "DomainArguments", vec: "float[:,:,:]", + vdim: "int", ): r""" Kernel for :class:`~struphy.pic.accumulation.particles_to_grid.AccumulatorVector` into V0 with the filling @@ -44,7 +45,6 @@ def charge_density_0form( markers = args_markers.markers Np = args_markers.Np - weight_idx = args_markers.weight_idx # -- removed omp: #$ omp parallel private (ip, eta1, eta2, eta3, filling) # -- removed omp: #$ omp for reduction ( + :vec) @@ -59,7 +59,7 @@ def charge_density_0form( eta3 = markers[ip, 2] # filling = w_p/N - filling = markers[ip, weight_idx] / Np + filling = markers[ip, 3 + vdim] / Np particle_to_mat_kernels.vec_fill_b_v0( args_derham, @@ -487,6 +487,57 @@ def linear_vlasov_ampere( # -- removed omp: #$ omp end parallel +def vlasov_maxwell_poisson( + args_markers: "MarkerArguments", + args_derham: "DerhamArguments", + args_domain: "DomainArguments", + vec: "float[:,:,:]", +): + r""" + Accumulates the charge density in V0 + + .. math:: + + \rho_p^\mu = w_p \,. + + Parameters + ---------- + + Note + ---- + The above parameter list contains only the model specific input arguments. + """ + + markers = args_markers.markers + Np = args_markers.Np + + # -- removed omp: #$ omp parallel private (ip, eta1, eta2, eta3, filling) + # -- removed omp: #$ omp for reduction ( + :vec) + for ip in range(shape(markers)[0]): + # only do something if particle is a "true" particle (i.e. not a hole) + if markers[ip, 0] == -1.0: + continue + + # marker positions + eta1 = markers[ip, 0] + eta2 = markers[ip, 1] + eta3 = markers[ip, 2] + + # filling = w_p + filling = markers[ip, 6] / Np + + particle_to_mat_kernels.vec_fill_b_v0( + args_derham, + eta1, + eta2, + eta3, + vec, + filling, + ) + + # -- removed omp: #$ omp end parallel + + @stack_array("dfm", "df_inv", "df_inv_t", "g_inv", "v", "df_inv_times_v", "filling_m", "filling_v") def vlasov_maxwell( args_markers: "MarkerArguments", @@ -1112,7 +1163,9 @@ def pc_lin_mhd_6d_full( vec1_3: "float[:,:,:]", vec2_3: "float[:,:,:]", vec3_3: "float[:,:,:]", - ep_scale: "float", + scale_mat: "float", + scale_vec: "float", + boundary_cut: "float", ): r"""Accumulates into V1 with the filling functions @@ -1156,6 +1209,10 @@ def pc_lin_mhd_6d_full( if markers[ip, 0] == -1.0: continue + # boundary cut + if markers[ip, 0] < boundary_cut or markers[ip, 0] > 1.0 - boundary_cut: + continue + # marker positions eta1 = markers[ip, 0] eta2 = markers[ip, 1] @@ -1186,8 +1243,8 @@ def pc_lin_mhd_6d_full( weight = markers[ip, 8] - filling_m[:, :] = weight * tmp1 / Np * ep_scale - filling_v[:] = weight * tmp_v / Np * ep_scale + filling_m[:, :] = weight * tmp1 / Np * scale_mat + filling_v[:] = weight * tmp_v / Np * scale_vec # call the appropriate matvec filler particle_to_mat_kernels.m_v_fill_v1_pressure_full( @@ -1305,7 +1362,9 @@ def pc_lin_mhd_6d( vec1_3: "float[:,:,:]", vec2_3: "float[:,:,:]", vec3_3: "float[:,:,:]", - ep_scale: "float", + scale_mat: "float", + scale_vec: "float", + boundary_cut: "float", ): r"""Accumulates into V1 with the filling functions @@ -1348,6 +1407,10 @@ def pc_lin_mhd_6d( if markers[ip, 0] == -1.0: continue + # boundary cut + if markers[ip, 0] < boundary_cut or markers[ip, 0] > 1.0 - boundary_cut: + continue + # marker positions eta1 = markers[ip, 0] eta2 = markers[ip, 1] @@ -1378,8 +1441,8 @@ def pc_lin_mhd_6d( linalg_kernels.matrix_matrix(df_inv, df_inv_t, tmp1) linalg_kernels.matrix_vector(df_inv, v, tmp_v) - filling_m[:, :] = weight * tmp1 * ep_scale - filling_v[:] = weight * tmp_v * ep_scale + filling_m[:, :] = weight * tmp1 * scale_mat + filling_v[:] = weight * tmp_v * scale_vec # call the appropriate matvec filler particle_to_mat_kernels.m_v_fill_v1_pressure( diff --git a/src/struphy/pic/accumulation/accum_kernels_gc.py b/src/struphy/pic/accumulation/accum_kernels_gc.py index fecf6a255..628eeeab7 100644 --- a/src/struphy/pic/accumulation/accum_kernels_gc.py +++ b/src/struphy/pic/accumulation/accum_kernels_gc.py @@ -8,7 +8,7 @@ These kernels are passed to :class:`struphy.pic.accumulation.particles_to_grid.Accumulator`. """ -from numpy import empty, mod, shape, zeros +from numpy import empty, shape, zeros from pyccel.decorators import stack_array import struphy.bsplines.bsplines_kernels as bsplines_kernels @@ -67,46 +67,6 @@ def gc_density_0form( # -- removed omp: #$ omp end parallel -def gc_mag_density_0form( - args_markers: "MarkerArguments", - args_derham: "DerhamArguments", - args_domain: "DomainArguments", - vec: "float[:,:,:]", - scale: "float", # model specific argument -): - r""" - Kernel for :class:`~struphy.pic.accumulation.particles_to_grid.AccumulatorVector` into V0 with the filling - - .. math:: - - B_p^\mu = \mu \frac{w_p}{N} \,. - """ - - markers = args_markers.markers - Np = args_markers.Np - - # -- removed omp: #$ omp parallel private (ip, eta1, eta2, eta3, filling) - # -- removed omp: #$ omp for reduction ( + :vec) - for ip in range(shape(markers)[0]): - # only do something if particle is a "true" particle (i.e. not a hole) - if markers[ip, 0] == -1.0: - continue - - # marker positions - eta1 = markers[ip, 0] - eta2 = markers[ip, 1] - eta3 = markers[ip, 2] - - # marker weight and magnetic moment - weight = markers[ip, 5] - mu = markers[ip, 9] - - # filling =mu*w_p/N - filling = mu * weight / Np * scale - - particle_to_mat_kernels.vec_fill_b_v0(args_derham, eta1, eta2, eta3, vec, filling) - - @stack_array("dfm", "df_inv", "df_inv_t", "g_inv", "tmp1", "tmp2", "b", "b_prod", "bstar", "norm_b1", "curl_norm_b") def cc_lin_mhd_5d_D( args_markers: "MarkerArguments", @@ -115,19 +75,22 @@ def cc_lin_mhd_5d_D( mat12: "float[:,:,:,:,:,:]", mat13: "float[:,:,:,:,:,:]", mat23: "float[:,:,:,:,:,:]", - epsilon: float, - ep_scale: "float", - b2_1: "float[:,:,:]", - b2_2: "float[:,:,:]", - b2_3: "float[:,:,:]", + epsilon: float, # model specific argument + b2_1: "float[:,:,:]", # model specific argument + b2_2: "float[:,:,:]", # model specific argument + b2_3: "float[:,:,:]", # model specific argument + # model specific argument norm_b11: "float[:,:,:]", norm_b12: "float[:,:,:]", norm_b13: "float[:,:,:]", + # model specific argument curl_norm_b1: "float[:,:,:]", curl_norm_b2: "float[:,:,:]", curl_norm_b3: "float[:,:,:]", basis_u: "int", -): + scale_mat: "float", + boundary_cut: float, +): # model specific argument r"""Accumulation kernel for the propagator :class:`~struphy.propagators.propagators_fields.CurrentCoupling5DDensity`. Accumulates :math:`\alpha`-form matrix with the filling functions (:math:`\alpha = 2`) @@ -194,6 +157,9 @@ def cc_lin_mhd_5d_D( v = markers[ip, 3] + if eta1 < boundary_cut or eta1 > 1.0 - boundary_cut: + continue + # b-field evaluation span1, span2, span3 = get_spans(eta1, eta2, eta3, args_derham) @@ -220,9 +186,11 @@ def cc_lin_mhd_5d_D( # calculate Bstar and transform to H1vec b_star[:] = b + epsilon * v * curl_norm_b + b_star /= det_df # calculate b_para and b_star_para b_para = linalg_kernels.scalar_dot(norm_b1, b) + b_para /= det_df b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) @@ -234,22 +202,13 @@ def cc_lin_mhd_5d_D( if basis_u == 0: # filling functions - filling_m12 = -weight * density_const * b_prod[0, 1] * ep_scale / epsilon - filling_m13 = -weight * density_const * b_prod[0, 2] * ep_scale / epsilon - filling_m23 = -weight * density_const * b_prod[1, 2] * ep_scale / epsilon + filling_m12 = -weight * density_const * b_prod[0, 1] * scale_mat + filling_m13 = -weight * density_const * b_prod[0, 2] * scale_mat + filling_m23 = -weight * density_const * b_prod[1, 2] * scale_mat # call the appropriate matvec filler particle_to_mat_kernels.mat_fill_v0vec_asym( - args_derham, - span1, - span2, - span3, - mat12, - mat13, - mat23, - filling_m12, - filling_m13, - filling_m23, + args_derham, span1, span2, span3, mat12, mat13, mat23, filling_m12, filling_m13, filling_m23 ) elif basis_u == 1: @@ -260,42 +219,24 @@ def cc_lin_mhd_5d_D( linalg_kernels.matrix_matrix(g_inv, b_prod, tmp1) linalg_kernels.matrix_matrix(tmp1, g_inv, tmp2) - filling_m12 = -weight * density_const * tmp2[0, 1] * ep_scale / epsilon - filling_m13 = -weight * density_const * tmp2[0, 2] * ep_scale / epsilon - filling_m23 = -weight * density_const * tmp2[1, 2] * ep_scale / epsilon + filling_m12 = -weight * density_const * tmp2[0, 1] * scale_mat + filling_m13 = -weight * density_const * tmp2[0, 2] * scale_mat + filling_m23 = -weight * density_const * tmp2[1, 2] * scale_mat # call the appropriate matvec filler particle_to_mat_kernels.mat_fill_v1_asym( - args_derham, - span1, - span2, - span3, - mat12, - mat13, - mat23, - filling_m12, - filling_m13, - filling_m23, + args_derham, span1, span2, span3, mat12, mat13, mat23, filling_m12, filling_m13, filling_m23 ) elif basis_u == 2: # filling functions - filling_m12 = -weight * density_const * b_prod[0, 1] * ep_scale / epsilon / det_df**2 - filling_m13 = -weight * density_const * b_prod[0, 2] * ep_scale / epsilon / det_df**2 - filling_m23 = -weight * density_const * b_prod[1, 2] * ep_scale / epsilon / det_df**2 + filling_m12 = -weight * density_const * b_prod[0, 1] * scale_mat / det_df**2 + filling_m13 = -weight * density_const * b_prod[0, 2] * scale_mat / det_df**2 + filling_m23 = -weight * density_const * b_prod[1, 2] * scale_mat / det_df**2 # call the appropriate matvec filler particle_to_mat_kernels.mat_fill_v2_asym( - args_derham, - span1, - span2, - span3, - mat12, - mat13, - mat23, - filling_m12, - filling_m13, - filling_m23, + args_derham, span1, span2, span3, mat12, mat13, mat23, filling_m12, filling_m13, filling_m23 ) # -- removed omp: #$ omp end parallel @@ -307,23 +248,23 @@ def cc_lin_mhd_5d_D( @stack_array( "dfm", - "df_inv", "df_inv_t", + "df_inv", "g_inv", "filling_m", "filling_v", "tmp", "tmp1", + "tmp2", "tmp_m", "tmp_v", "b", - "bfull_star", "b_prod", - "b_prod_neg", + "b_prod_negb_star", "norm_b1", "curl_norm_b", ) -def cc_lin_mhd_5d_curlb( +def cc_lin_mhd_5d_J1( args_markers: "MarkerArguments", args_derham: "DerhamArguments", args_domain: "DomainArguments", @@ -336,19 +277,21 @@ def cc_lin_mhd_5d_curlb( vec1: "float[:,:,:]", vec2: "float[:,:,:]", vec3: "float[:,:,:]", - epsilon: float, - ep_scale: float, - b1: "float[:,:,:]", - b2: "float[:,:,:]", - b3: "float[:,:,:]", - norm_b11: "float[:,:,:]", - norm_b12: "float[:,:,:]", - norm_b13: "float[:,:,:]", - curl_norm_b1: "float[:,:,:]", - curl_norm_b2: "float[:,:,:]", - curl_norm_b3: "float[:,:,:]", - basis_u: "int", -): + epsilon: float, # model specific argument + b1: "float[:,:,:]", # model specific argument + b2: "float[:,:,:]", # model specific argument + b3: "float[:,:,:]", # model specific argument + norm_b11: "float[:,:,:]", # model specific argument + norm_b12: "float[:,:,:]", # model specific argument + norm_b13: "float[:,:,:]", # model specific argument + curl_norm_b1: "float[:,:,:]", # model specific argument + curl_norm_b2: "float[:,:,:]", # model specific argument + curl_norm_b3: "float[:,:,:]", # model specific argument + basis_u: "int", # model specific argument + scale_mat: "float", # model specific argument + scale_vec: "float", # model specific argument + boundary_cut: "float", +): # model specific argument r"""Accumulation kernel for the propagator :class:`~struphy.propagators.propagators_coupling.CurrentCoupling5DCurlb`. Accumulates :math:`\alpha`-form matrix and vector with the filling functions (:math:`\alpha = 2`) @@ -360,6 +303,21 @@ def cc_lin_mhd_5d_curlb( B_p^\mu &= w_p \left( \frac{v^2_{\parallel,p}}{g\hat B^*_\parallel} \mathbf B^2_{\times} \right)_\mu \,, where :math:`\mathbf B^2_{\times} \mathbf a := \hat{\mathbf B}^2 \times \mathbf a` for :math:`a \in \mathbb R^3`. + + Parameters + ---------- + b1, b2, b3 : array[float] + FE coefficients c_ijk of the magnetic field as a 2-form. + + norm_b11, norm_b12, norm_b13 : array[float] + FE coefficients c_ijk of the normalized magnetic field as a 1-form. + + curl_norm_b1, curl_norm_b2, curl_norm_b3 : array[float] + FE coefficients c_ijk of the curl of normalized magnetic field as a 2-form. + + Note + ---- + The above parameter list contains only the model specific input arguments. """ markers = args_markers.markers @@ -367,7 +325,7 @@ def cc_lin_mhd_5d_curlb( # allocate for magnetic field evaluation b = empty(3, dtype=float) - bfull_star = empty(3, dtype=float) + b_star = empty(3, dtype=float) b_prod = zeros((3, 3), dtype=float) b_prod_neg = zeros((3, 3), dtype=float) norm_b1 = empty(3, dtype=float) @@ -380,11 +338,12 @@ def cc_lin_mhd_5d_curlb( g_inv = empty((3, 3), dtype=float) # allocate for filling - filling_m = zeros((3, 3), dtype=float) - filling_v = zeros(3, dtype=float) + filling_m = empty((3, 3), dtype=float) + filling_v = empty(3, dtype=float) tmp = empty((3, 3), dtype=float) tmp1 = empty((3, 3), dtype=float) + tmp2 = empty((3, 3), dtype=float) tmp_m = empty((3, 3), dtype=float) tmp_v = empty(3, dtype=float) @@ -392,6 +351,8 @@ def cc_lin_mhd_5d_curlb( # get number of markers n_markers_loc = shape(markers)[0] + # -- removed omp: #$ omp parallel firstprivate(b_prod) private(ip, boundary_cut, eta1, eta2, eta3, v, weight, span1, span2, span3, b1, b2, b3, b, b_star, b_prod_neg, norm_b1, curl_norm_b, abs_b_star_para, dfm, df_inv, df_inv_t, g_inv, det_df, tmp, tmp1, tmp2, tmp_m, tmp_v, filling_m, filling_v) + # -- removed omp: #$ omp for reduction ( + : mat11, mat12, mat13, mat22, mat23, mat33, vec1, vec2, vec3) for ip in range(n_markers_loc): # only do something if particle is a "true" particle (i.e. not a hole) if markers[ip, 0] == -1.0: @@ -406,6 +367,9 @@ def cc_lin_mhd_5d_curlb( weight = markers[ip, 5] v = markers[ip, 3] + if eta1 < boundary_cut or eta1 > 1.0 - boundary_cut: + continue + # b-field evaluation span1, span2, span3 = get_spans(eta1, eta2, eta3, args_derham) @@ -423,11 +387,11 @@ def cc_lin_mhd_5d_curlb( # curl_norm_b; 2form eval_2form_spline_mpi(span1, span2, span3, args_derham, curl_norm_b1, curl_norm_b2, curl_norm_b3, curl_norm_b) - # b_star; 2form - bfull_star[:] = b + curl_norm_b * v * epsilon + # b_star; 2form in H1vec + b_star[:] = (b + curl_norm_b * v * epsilon) / det_df # calculate abs_b_star_para - abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, bfull_star) + abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) # calculate tensor product of two curl_norm_b linalg_kernels.outer(curl_norm_b, curl_norm_b, tmp) @@ -447,8 +411,8 @@ def cc_lin_mhd_5d_curlb( linalg_kernels.matrix_matrix(tmp1, b_prod_neg, tmp_m) linalg_kernels.matrix_vector(b_prod, curl_norm_b, tmp_v) - filling_m[:, :] += weight * tmp_m * v**2 / abs_b_star_para**2 * ep_scale - filling_v[:] += weight * tmp_v * v**2 / abs_b_star_para * ep_scale + filling_m[:, :] = weight * tmp_m * v**2 / abs_b_star_para**2 / det_df**2 * scale_mat + filling_v[:] = weight * tmp_v * v**2 / abs_b_star_para / det_df * scale_vec # call the appropriate matvec filler particle_to_mat_kernels.m_v_fill_v0vec_symm( @@ -476,13 +440,54 @@ def cc_lin_mhd_5d_curlb( filling_v[2], ) + elif basis_u == 1: + # needed metric coefficients + linalg_kernels.matrix_inv_with_det(dfm, det_df, df_inv) + linalg_kernels.transpose(df_inv, df_inv_t) + linalg_kernels.matrix_matrix(df_inv, df_inv_t, g_inv) + linalg_kernels.matrix_matrix(g_inv, b_prod, tmp1) + linalg_kernels.matrix_vector(tmp1, curl_norm_b, tmp_v) + + linalg_kernels.matrix_matrix(tmp1, tmp, tmp2) + linalg_kernels.matrix_matrix(tmp2, b_prod_neg, tmp1) + linalg_kernels.matrix_matrix(tmp1, g_inv, tmp_m) + + filling_m[:, :] = weight * tmp_m * v**2 / abs_b_star_para**2 / det_df**2 * scale_mat + filling_v[:] = weight * tmp_v * v**2 / abs_b_star_para / det_df * scale_vec + + # call the appropriate matvec filler + particle_to_mat_kernels.m_v_fill_v1_symm( + args_derham, + span1, + span2, + span3, + mat11, + mat12, + mat13, + mat22, + mat23, + mat33, + filling_m[0, 0], + filling_m[0, 1], + filling_m[0, 2], + filling_m[1, 1], + filling_m[1, 2], + filling_m[2, 2], + vec1, + vec2, + vec3, + filling_v[0], + filling_v[1], + filling_v[2], + ) + elif basis_u == 2: linalg_kernels.matrix_matrix(b_prod, tmp, tmp1) linalg_kernels.matrix_matrix(tmp1, b_prod_neg, tmp_m) linalg_kernels.matrix_vector(b_prod, curl_norm_b, tmp_v) - filling_m[:, :] = weight * tmp_m * v**2 / abs_b_star_para**2 / det_df**2 * ep_scale - filling_v[:] = weight * tmp_v * v**2 / abs_b_star_para / det_df * ep_scale + filling_m[:, :] = weight * tmp_m * v**2 / abs_b_star_para**2 / det_df**4 * scale_mat + filling_v[:] = weight * tmp_v * v**2 / abs_b_star_para / det_df**2 * scale_vec # call the appropriate matvec filler particle_to_mat_kernels.m_v_fill_v2_symm( @@ -521,6 +526,8 @@ def cc_lin_mhd_5d_curlb( vec2 /= Np vec3 /= Np + # -- removed omp: #$ omp end parallel + @stack_array("dfm", "norm_b1", "filling_v") def cc_lin_mhd_5d_M( @@ -540,7 +547,8 @@ def cc_lin_mhd_5d_M( norm_b12: "float[:,:,:]", # model specific argument norm_b13: "float[:,:,:]", # model specific argument scale_vec: "float", # model specific argument -): + boundary_cut: "float", +): # model specific argument r"""Accumulation kernel for the propagator :class:`~struphy.propagators.propagators_fields.ShearAlfvenCurrentCoupling5D` and :class:`~struphy.propagators.propagators_fields.MagnetosonicCurrentCoupling5D`. Accumulates 2-form vector with the filling functions: @@ -592,6 +600,9 @@ def cc_lin_mhd_5d_M( weight = markers[ip, 5] mu = markers[ip, 9] + if eta1 < boundary_cut or eta1 > 1.0 - boundary_cut: + continue + # b-field evaluation span1, span2, span3 = get_spans(eta1, eta2, eta3, args_derham) @@ -606,16 +617,7 @@ def cc_lin_mhd_5d_M( filling_v[:] = weight * mu / det_df * scale_vec * norm_b1 particle_to_mat_kernels.vec_fill_v2( - args_derham, - span1, - span2, - span3, - vec1, - vec2, - vec3, - filling_v[0], - filling_v[1], - filling_v[2], + args_derham, span1, span2, span3, vec1, vec2, vec3, filling_v[0], filling_v[1], filling_v[2] ) vec1 /= Np @@ -631,17 +633,19 @@ def cc_lin_mhd_5d_M( "df_inv", "g_inv", "filling_v", - "tmp", + "tmp1", + "tmp2", "tmp_v", "b", "b_prod", - "norm_b_prod", + "norm_b2_prod", "b_star", "curl_norm_b", "norm_b1", + "norm_b2", "grad_PB", ) -def cc_lin_mhd_5d_gradB( +def cc_lin_mhd_5d_J2( args_markers: "MarkerArguments", args_derham: "DerhamArguments", args_domain: "DomainArguments", @@ -654,22 +658,27 @@ def cc_lin_mhd_5d_gradB( vec1: "float[:,:,:]", vec2: "float[:,:,:]", vec3: "float[:,:,:]", - epsilon: float, - ep_scale: float, - b1: "float[:,:,:]", - b2: "float[:,:,:]", - b3: "float[:,:,:]", - norm_b11: "float[:,:,:]", - norm_b12: "float[:,:,:]", - norm_b13: "float[:,:,:]", - curl_norm_b1: "float[:,:,:]", - curl_norm_b2: "float[:,:,:]", - curl_norm_b3: "float[:,:,:]", - grad_PB1: "float[:,:,:]", - grad_PB2: "float[:,:,:]", - grad_PB3: "float[:,:,:]", + epsilon: float, # model specific argument + b1: "float[:,:,:]", # model specific argument + b2: "float[:,:,:]", # model specific argument + b3: "float[:,:,:]", # model specific argument + norm_b11: "float[:,:,:]", # model specific argument + norm_b12: "float[:,:,:]", # model specific argument + norm_b13: "float[:,:,:]", # model specific argument + norm_b21: "float[:,:,:]", # model specific argument + norm_b22: "float[:,:,:]", # model specific argument + norm_b23: "float[:,:,:]", # model specific argument + curl_norm_b1: "float[:,:,:]", # model specific argument + curl_norm_b2: "float[:,:,:]", # model specific argument + curl_norm_b3: "float[:,:,:]", # model specific argument + grad_PB1: "float[:,:,:]", # model specific argument + grad_PB2: "float[:,:,:]", # model specific argument + grad_PB3: "float[:,:,:]", # model specific argument basis_u: "int", -): + scale_mat: "float", + scale_vec: "float", + boundary_cut: float, +): # model specific argument r"""Accumulation kernel for the propagator :class:`~struphy.propagators.propagators_coupling.CurrentCoupling5DGradB`. Accumulates math:`\alpha` -form vector with the filling functions @@ -688,6 +697,9 @@ def cc_lin_mhd_5d_gradB( norm_b11, norm_b12, norm_b13 : array[float] FE coefficients c_ijk of the normalized magnetic field as a 1-form. + norm_b21, norm_b22, norm_b23 : array[float] + FE coefficients c_ijk of the normalized magnetic field as a 2-form. + curl_norm_b1, curl_norm_b2, curl_norm_b3 : array[float] FE coefficients c_ijk of the curl of normalized magnetic field as a 2-form. @@ -706,9 +718,10 @@ def cc_lin_mhd_5d_gradB( b = empty(3, dtype=float) b_star = empty(3, dtype=float) b_prod = zeros((3, 3), dtype=float) - norm_b_prod = zeros((3, 3), dtype=float) + norm_b2_prod = zeros((3, 3), dtype=float) curl_norm_b = empty(3, dtype=float) norm_b1 = empty(3, dtype=float) + norm_b2 = empty(3, dtype=float) grad_PB = empty(3, dtype=float) # allocate for metric coeffs @@ -719,13 +732,17 @@ def cc_lin_mhd_5d_gradB( # allocate for filling filling_v = empty(3, dtype=float) - tmp = empty((3, 3), dtype=float) + + tmp1 = empty((3, 3), dtype=float) + tmp2 = empty((3, 3), dtype=float) tmp_v = empty(3, dtype=float) # get number of markers n_markers_loc = shape(markers)[0] + # -- removed omp: #$ omp parallel firstprivate(b_prod) private(ip, boundary_cut, eta1, eta2, eta3, v, mu, weight, span1, span2, span3, b1, b2, b3, b, b_star, norm_b1, norm_b2, norm_b2_prod, curl_norm_b, grad_PB, abs_b_star_para, dfm, df_inv, df_inv_t, g_inv, det_df, tmp1, tmp2, tmp_v, filling_v) + # -- removed omp: #$ omp for reduction ( + : mat11, mat12, mat13, mat22, mat23, mat33, vec1, vec2, vec3) for ip in range(n_markers_loc): # only do something if particle is a "true" particle (i.e. not a hole) if markers[ip, 0] == -1.0: @@ -736,191 +753,9 @@ def cc_lin_mhd_5d_gradB( eta2 = markers[ip, 1] eta3 = markers[ip, 2] - # marker weight and velocity - weight = markers[ip, 5] - v = markers[ip, 3] - mu = markers[ip, 9] - - # b-field evaluation - span1, span2, span3 = get_spans(eta1, eta2, eta3, args_derham) - - # evaluate Jacobian, result in dfm - evaluation_kernels.df(eta1, eta2, eta3, args_domain, dfm) - - det_df = linalg_kernels.det(dfm) - - # needed metric coefficients - linalg_kernels.matrix_inv_with_det(dfm, det_df, df_inv) - linalg_kernels.transpose(df_inv, df_inv_t) - linalg_kernels.matrix_matrix(df_inv, df_inv_t, g_inv) - - # b; 2form - eval_2form_spline_mpi(span1, span2, span3, args_derham, b1, b2, b3, b) - - # norm_b1; 1form - eval_1form_spline_mpi(span1, span2, span3, args_derham, norm_b11, norm_b12, norm_b13, norm_b1) - - # curl_norm_b; 2form - eval_2form_spline_mpi(span1, span2, span3, args_derham, curl_norm_b1, curl_norm_b2, curl_norm_b3, curl_norm_b) - - # grad_PB; 1form - eval_1form_spline_mpi(span1, span2, span3, args_derham, grad_PB1, grad_PB2, grad_PB3, grad_PB) - - # b_star; 2form transformed into H1vec - b_star[:] = b + curl_norm_b * v * epsilon - - # calculate abs_b_star_para - abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) - - # operator bx() as matrix - b_prod[0, 1] = -b[2] - b_prod[0, 2] = +b[1] - b_prod[1, 0] = +b[2] - b_prod[1, 2] = -b[0] - b_prod[2, 0] = -b[1] - b_prod[2, 1] = +b[0] - - norm_b_prod[0, 1] = -norm_b1[2] - norm_b_prod[0, 2] = +norm_b1[1] - norm_b_prod[1, 0] = +norm_b1[2] - norm_b_prod[1, 2] = -norm_b1[0] - norm_b_prod[2, 0] = -norm_b1[1] - norm_b_prod[2, 1] = +norm_b1[0] - - if basis_u == 0: - linalg_kernels.matrix_matrix(b_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - - filling_v[:] = weight * tmp_v * mu / abs_b_star_para * ep_scale - - # call the appropriate matvec filler - particle_to_mat_kernels.vec_fill_v0vec( - args_derham, - span1, - span2, - span3, - vec1, - vec2, - vec3, - filling_v[0], - filling_v[1], - filling_v[2], - ) - - elif basis_u == 2: - linalg_kernels.matrix_matrix(b_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - - filling_v[:] = weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale - - # call the appropriate matvec filler - particle_to_mat_kernels.vec_fill_v2( - args_derham, - span1, - span2, - span3, - vec1, - vec2, - vec3, - filling_v[0], - filling_v[1], - filling_v[2], - ) - vec1 /= Np - vec2 /= Np - vec3 /= Np - - -@stack_array( - "dfm", - "df_inv_t", - "df_inv", - "g_inv", - "filling_v", - "tmp", - "tmp_v", - "b", - "b_prod", - "beq", - "beq_prod", - "norm_b_prod", - "bfull_star", - "curl_norm_b", - "norm_b1", - "grad_PB", - "grad_PBeq", -) -def cc_lin_mhd_5d_gradB_dg_init( - args_markers: "MarkerArguments", - args_derham: "DerhamArguments", - args_domain: "DomainArguments", - vec1: "float[:,:,:]", - vec2: "float[:,:,:]", - vec3: "float[:,:,:]", - epsilon: float, - ep_scale: float, - b1: "float[:,:,:]", - b2: "float[:,:,:]", - b3: "float[:,:,:]", - beq1: "float[:,:,:]", - beq2: "float[:,:,:]", - beq3: "float[:,:,:]", - norm_b11: "float[:,:,:]", - norm_b12: "float[:,:,:]", - norm_b13: "float[:,:,:]", - curl_norm_b1: "float[:,:,:]", - curl_norm_b2: "float[:,:,:]", - curl_norm_b3: "float[:,:,:]", - grad_PB1: "float[:,:,:]", - grad_PB2: "float[:,:,:]", - grad_PB3: "float[:,:,:]", - grad_PBeq1: "float[:,:,:]", - grad_PBeq2: "float[:,:,:]", - grad_PBeq3: "float[:,:,:]", - basis_u: "int", -): - r"""TODO""" - - markers = args_markers.markers - Np = args_markers.Np - - # allocate for magnetic field evaluation - b = empty(3, dtype=float) - beq = empty(3, dtype=float) - bfull_star = empty(3, dtype=float) - b_prod = zeros((3, 3), dtype=float) - beq_prod = zeros((3, 3), dtype=float) - norm_b_prod = zeros((3, 3), dtype=float) - curl_norm_b = empty(3, dtype=float) - norm_b1 = empty(3, dtype=float) - grad_PB = empty(3, dtype=float) - grad_PBeq = empty(3, dtype=float) - - # allocate for metric coeffs - dfm = empty((3, 3), dtype=float) - df_inv = empty((3, 3), dtype=float) - df_inv_t = empty((3, 3), dtype=float) - g_inv = empty((3, 3), dtype=float) - - # allocate for filling - filling_v = empty(3, dtype=float) - tmp = empty((3, 3), dtype=float) - - tmp_v = empty(3, dtype=float) - - # get number of markers - n_markers_loc = shape(markers)[0] - - for ip in range(n_markers_loc): - # only do something if particle is a "true" particle (i.e. not a hole) - if markers[ip, 0] == -1.0: + if eta1 < boundary_cut or eta1 > 1.0 - boundary_cut: continue - # marker positions - eta1 = markers[ip, 0] - eta2 = markers[ip, 1] - eta3 = markers[ip, 2] - # marker weight and velocity weight = markers[ip, 5] v = markers[ip, 3] @@ -942,26 +777,23 @@ def cc_lin_mhd_5d_gradB_dg_init( # b; 2form eval_2form_spline_mpi(span1, span2, span3, args_derham, b1, b2, b3, b) - # beq; 2form - eval_2form_spline_mpi(span1, span2, span3, args_derham, beq1, beq2, beq3, beq) - # norm_b1; 1form eval_1form_spline_mpi(span1, span2, span3, args_derham, norm_b11, norm_b12, norm_b13, norm_b1) + # norm_b2; 2form + eval_2form_spline_mpi(span1, span2, span3, args_derham, norm_b21, norm_b22, norm_b23, norm_b2) + # curl_norm_b; 2form eval_2form_spline_mpi(span1, span2, span3, args_derham, curl_norm_b1, curl_norm_b2, curl_norm_b3, curl_norm_b) # grad_PB; 1form eval_1form_spline_mpi(span1, span2, span3, args_derham, grad_PB1, grad_PB2, grad_PB3, grad_PB) - # grad_PBeq; 1form - eval_1form_spline_mpi(span1, span2, span3, args_derham, grad_PBeq1, grad_PBeq2, grad_PBeq3, grad_PBeq) - # b_star; 2form transformed into H1vec - bfull_star[:] = b + beq + curl_norm_b * v * epsilon + b_star[:] = (b + curl_norm_b * v * epsilon) / det_df # calculate abs_b_star_para - abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, bfull_star) + abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) # operator bx() as matrix b_prod[0, 1] = -b[2] @@ -971,346 +803,58 @@ def cc_lin_mhd_5d_gradB_dg_init( b_prod[2, 0] = -b[1] b_prod[2, 1] = +b[0] - beq_prod[0, 1] = -beq[2] - beq_prod[0, 2] = +beq[1] - beq_prod[1, 0] = +beq[2] - beq_prod[1, 2] = -beq[0] - beq_prod[2, 0] = -beq[1] - beq_prod[2, 1] = +beq[0] - - norm_b_prod[0, 1] = -norm_b1[2] - norm_b_prod[0, 2] = +norm_b1[1] - norm_b_prod[1, 0] = +norm_b1[2] - norm_b_prod[1, 2] = -norm_b1[0] - norm_b_prod[2, 0] = -norm_b1[1] - norm_b_prod[2, 1] = +norm_b1[0] + norm_b2_prod[0, 1] = -norm_b2[2] + norm_b2_prod[0, 2] = +norm_b2[1] + norm_b2_prod[1, 0] = +norm_b2[2] + norm_b2_prod[1, 2] = -norm_b2[0] + norm_b2_prod[2, 0] = -norm_b2[1] + norm_b2_prod[2, 1] = +norm_b2[0] if basis_u == 0: - # beq contribution - linalg_kernels.matrix_matrix(beq_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PBeq, tmp_v) + linalg_kernels.matrix_matrix(b_prod, g_inv, tmp1) + linalg_kernels.matrix_matrix(tmp1, norm_b2_prod, tmp2) + linalg_kernels.matrix_matrix(tmp2, g_inv, tmp1) - filling_v[:] = weight * tmp_v * mu / abs_b_star_para * ep_scale + linalg_kernels.matrix_vector(tmp1, grad_PB, tmp_v) - # b contribution - linalg_kernels.matrix_matrix(beq_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - - filling_v[:] += weight * tmp_v * mu / abs_b_star_para * ep_scale - - linalg_kernels.matrix_matrix(b_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PBeq, tmp_v) - - filling_v[:] += weight * tmp_v * mu / abs_b_star_para * ep_scale - - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - - filling_v[:] += weight * tmp_v * mu / abs_b_star_para * ep_scale + filling_v[:] = weight * tmp_v * mu / abs_b_star_para * scale_vec # call the appropriate matvec filler particle_to_mat_kernels.vec_fill_v0vec( - args_derham, - span1, - span2, - span3, - vec1, - vec2, - vec3, - filling_v[0], - filling_v[1], - filling_v[2], - ) - - elif basis_u == 2: - # beq contribution - linalg_kernels.matrix_matrix(beq_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PBeq, tmp_v) - - filling_v[:] = weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale - - # b contribution - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - - filling_v[:] += weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale - - linalg_kernels.matrix_matrix(b_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PBeq, tmp_v) - - filling_v[:] += weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale - - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - - filling_v[:] += weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale - - # call the appropriate matvec filler - particle_to_mat_kernels.vec_fill_v2( - args_derham, - span1, - span2, - span3, - vec1, - vec2, - vec3, - filling_v[0], - filling_v[1], - filling_v[2], + args_derham, span1, span2, span3, vec1, vec2, vec3, filling_v[0], filling_v[1], filling_v[2] ) - vec1 /= Np - vec2 /= Np - vec3 /= Np - - -@stack_array( - "dfm", - "df_inv_t", - "df_inv", - "g_inv", - "filling_v", - "tmp", - "tmp_v", - "b", - "b_prod", - "eta_diff", - "beq", - "beq_prod", - "norm_b_prod", - "bfull_star", - "curl_norm_b", - "norm_b1", - "grad_PB", - "grad_PBeq", - "eta_mid", - "eta_diff", -) -def cc_lin_mhd_5d_gradB_dg( - args_markers: "MarkerArguments", - args_derham: "DerhamArguments", - args_domain: "DomainArguments", - vec1: "float[:,:,:]", - vec2: "float[:,:,:]", - vec3: "float[:,:,:]", - epsilon: float, - ep_scale: float, - b1: "float[:,:,:]", - b2: "float[:,:,:]", - b3: "float[:,:,:]", - beq1: "float[:,:,:]", - beq2: "float[:,:,:]", - beq3: "float[:,:,:]", - norm_b11: "float[:,:,:]", - norm_b12: "float[:,:,:]", - norm_b13: "float[:,:,:]", - curl_norm_b1: "float[:,:,:]", - curl_norm_b2: "float[:,:,:]", - curl_norm_b3: "float[:,:,:]", - grad_PB1: "float[:,:,:]", - grad_PB2: "float[:,:,:]", - grad_PB3: "float[:,:,:]", - grad_PBeq1: "float[:,:,:]", - grad_PBeq2: "float[:,:,:]", - grad_PBeq3: "float[:,:,:]", - basis_u: "int", - const: "float", -): - r"""TODO""" - - markers = args_markers.markers - Np = args_markers.Np - - # allocate for magnetic field evaluation - eta_diff = empty(3, dtype=float) - eta_mid = empty(3, dtype=float) - b = empty(3, dtype=float) - beq = empty(3, dtype=float) - bfull_star = empty(3, dtype=float) - b_prod = zeros((3, 3), dtype=float) - beq_prod = zeros((3, 3), dtype=float) - norm_b_prod = zeros((3, 3), dtype=float) - curl_norm_b = empty(3, dtype=float) - norm_b1 = empty(3, dtype=float) - grad_PB = empty(3, dtype=float) - grad_PBeq = empty(3, dtype=float) - - # allocate for metric coeffs - dfm = empty((3, 3), dtype=float) - df_inv = empty((3, 3), dtype=float) - df_inv_t = empty((3, 3), dtype=float) - g_inv = empty((3, 3), dtype=float) - - # allocate for filling - filling_v = empty(3, dtype=float) - tmp = empty((3, 3), dtype=float) - - tmp_v = empty(3, dtype=float) - - # get number of markers - n_markers_loc = shape(markers)[0] - - for ip in range(n_markers_loc): - # only do something if particle is a "true" particle (i.e. not a hole) - if markers[ip, 0] == -1.0: - continue - - # marker positions, mid point - eta_mid[:] = (markers[ip, 0:3] + markers[ip, 11:14]) / 2.0 - eta_mid[:] = mod(eta_mid[:], 1.0) - - eta_diff[:] = markers[ip, 0:3] - markers[ip, 11:14] - - # marker weight and velocity - weight = markers[ip, 5] - v = markers[ip, 3] - mu = markers[ip, 9] - - # b-field evaluation - span1, span2, span3 = get_spans(eta_mid[0], eta_mid[1], eta_mid[2], args_derham) - - # evaluate Jacobian, result in dfm - evaluation_kernels.df(eta_mid[0], eta_mid[1], eta_mid[2], args_domain, dfm) - - det_df = linalg_kernels.det(dfm) - - # needed metric coefficients - linalg_kernels.matrix_inv_with_det(dfm, det_df, df_inv) - linalg_kernels.transpose(df_inv, df_inv_t) - linalg_kernels.matrix_matrix(df_inv, df_inv_t, g_inv) - - # b; 2form - eval_2form_spline_mpi(span1, span2, span3, args_derham, b1, b2, b3, b) - - # beq; 2form - eval_2form_spline_mpi(span1, span2, span3, args_derham, beq1, beq2, beq3, beq) - - # norm_b1; 1form - eval_1form_spline_mpi(span1, span2, span3, args_derham, norm_b11, norm_b12, norm_b13, norm_b1) - - # curl_norm_b; 2form - eval_2form_spline_mpi(span1, span2, span3, args_derham, curl_norm_b1, curl_norm_b2, curl_norm_b3, curl_norm_b) - - # grad_PB; 1form - eval_1form_spline_mpi(span1, span2, span3, args_derham, grad_PB1, grad_PB2, grad_PB3, grad_PB) - - # grad_PBeq; 1form - eval_1form_spline_mpi(span1, span2, span3, args_derham, grad_PBeq1, grad_PBeq2, grad_PBeq3, grad_PBeq) - - # b_star; 2form transformed into H1vec - bfull_star[:] = b + beq + curl_norm_b * v * epsilon - - # calculate abs_b_star_para - abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, bfull_star) - - # operator bx() as matrix - b_prod[0, 1] = -b[2] - b_prod[0, 2] = +b[1] - b_prod[1, 0] = +b[2] - b_prod[1, 2] = -b[0] - b_prod[2, 0] = -b[1] - b_prod[2, 1] = +b[0] - - beq_prod[0, 1] = -beq[2] - beq_prod[0, 2] = +beq[1] - beq_prod[1, 0] = +beq[2] - beq_prod[1, 2] = -beq[0] - beq_prod[2, 0] = -beq[1] - beq_prod[2, 1] = +beq[0] - - norm_b_prod[0, 1] = -norm_b1[2] - norm_b_prod[0, 2] = +norm_b1[1] - norm_b_prod[1, 0] = +norm_b1[2] - norm_b_prod[1, 2] = -norm_b1[0] - norm_b_prod[2, 0] = -norm_b1[1] - norm_b_prod[2, 1] = +norm_b1[0] - - if basis_u == 0: - # beq * gradPBeq contribution - linalg_kernels.matrix_matrix(beq_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PBeq, tmp_v) - - filling_v[:] = weight * tmp_v * mu / abs_b_star_para * ep_scale - - # beq * gradPB contribution - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - filling_v[:] += weight * tmp_v * mu / abs_b_star_para * ep_scale - - # beq * dg term contribution - linalg_kernels.matrix_vector(tmp, eta_diff, tmp_v) - filling_v[:] += tmp_v / abs_b_star_para * const - - # b * gradPBeq contribution - linalg_kernels.matrix_matrix(b_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PBeq, tmp_v) - filling_v[:] += weight * tmp_v * mu / abs_b_star_para * ep_scale + elif basis_u == 1: + linalg_kernels.matrix_matrix(g_inv, b_prod, tmp1) + linalg_kernels.matrix_matrix(tmp1, g_inv, tmp2) + linalg_kernels.matrix_matrix(tmp2, norm_b2_prod, tmp1) + linalg_kernels.matrix_matrix(tmp1, g_inv, tmp2) - # b * gradPB contribution - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - filling_v[:] += weight * tmp_v * mu / abs_b_star_para * ep_scale + linalg_kernels.matrix_vector(tmp2, grad_PB, tmp_v) - # b * dg term contribution - linalg_kernels.matrix_vector(tmp, eta_diff, tmp_v) - filling_v[:] += tmp_v / abs_b_star_para * const + filling_v[:] = weight * tmp_v * mu / abs_b_star_para * scale_vec # call the appropriate matvec filler - particle_to_mat_kernels.vec_fill_v0vec( - args_derham, - span1, - span2, - span3, - vec1, - vec2, - vec3, - filling_v[0], - filling_v[1], - filling_v[2], + particle_to_mat_kernels.vec_fill_v1( + args_derham, span1, span2, span3, vec1, vec2, vec3, filling_v[0], filling_v[1], filling_v[2] ) elif basis_u == 2: - # beq * gradPBeq contribution - linalg_kernels.matrix_matrix(beq_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PBeq, tmp_v) - - filling_v[:] = weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale - - # beq * gradPB contribution - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - - filling_v[:] += weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale - - # beq * dg term contribution - linalg_kernels.matrix_vector(tmp, eta_diff, tmp_v) - - filling_v[:] += tmp_v / abs_b_star_para / det_df * const - - # b * gradPBeq contribtuion - linalg_kernels.matrix_matrix(b_prod, norm_b_prod, tmp) - linalg_kernels.matrix_vector(tmp, grad_PBeq, tmp_v) + linalg_kernels.matrix_matrix(b_prod, g_inv, tmp1) + linalg_kernels.matrix_matrix(tmp1, norm_b2_prod, tmp2) + linalg_kernels.matrix_matrix(tmp2, g_inv, tmp1) - filling_v[:] += weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale + linalg_kernels.matrix_vector(tmp1, grad_PB, tmp_v) - # b * gradPB contribution - linalg_kernels.matrix_vector(tmp, grad_PB, tmp_v) - - filling_v[:] += weight * tmp_v * mu / abs_b_star_para / det_df * ep_scale - - # b * dg term contribution - linalg_kernels.matrix_vector(tmp, eta_diff, tmp_v) - - filling_v[:] += tmp_v / abs_b_star_para / det_df * const + filling_v[:] = weight * tmp_v * mu / abs_b_star_para / det_df * scale_vec # call the appropriate matvec filler particle_to_mat_kernels.vec_fill_v2( - args_derham, - span1, - span2, - span3, - vec1, - vec2, - vec3, - filling_v[0], - filling_v[1], - filling_v[2], + args_derham, span1, span2, span3, vec1, vec2, vec3, filling_v[0], filling_v[1], filling_v[2] ) vec1 /= Np vec2 /= Np vec3 /= Np + + # -- removed omp: #$ omp end parallel diff --git a/src/struphy/pic/accumulation/filter_kernels.py b/src/struphy/pic/accumulation/filter_kernels.py index a6c498ca8..e24c7ad5d 100644 --- a/src/struphy/pic/accumulation/filter_kernels.py +++ b/src/struphy/pic/accumulation/filter_kernels.py @@ -5,10 +5,8 @@ @stack_array("vec_copy", "mask1d", "mask", "top", "i_bottom", "i_top", "fi", "ir") -def apply_three_point_filter_3d( +def apply_three_point_filter( vec: "float[:,:,:]", - dir: "int", - form: "int", Nel: "int[:]", spl_kind: "bool[:]", pn: "int[:]", @@ -49,7 +47,6 @@ def apply_three_point_filter_3d( i_top = zeros(3, dtype=int) fi = empty(3, dtype=int) ir = empty(3, dtype=int) - isDspline = zeros(3, dtype=int) # copy vectors vec_copy[:, :, :] = vec[:, :, :] @@ -65,33 +62,22 @@ def apply_three_point_filter_3d( mask[i, j, k] *= mask1d[i] * mask1d[j] * mask1d[k] # consider left and right boundary - if form == 1: - isDspline[dir] = 1 - elif form == 2: - isDspline[:] = 1 - isDspline[dir] = 0 - elif form == 3: - isDspline[:] = 1 - for i in range(3): if spl_kind[i]: top[i] = Nel[i] - 1 else: - if isDspline[i] == 1: - top[i] = Nel[i] + pn[i] - 2 - else: - top[i] = Nel[i] + pn[i] - 1 + top[i] = Nel[i] + pn[i] - 1 for i in range(3): if starts[i] == 0: if spl_kind[i]: - i_bottom[i] = 0 + i_bottom[i] = -1 else: i_bottom[i] = +1 if ends[i] == top[i]: if spl_kind[i]: - i_top[i] = 0 + i_top[i] = +1 else: i_top[i] = -1 diff --git a/src/struphy/pic/accumulation/particle_to_mat_kernels.py b/src/struphy/pic/accumulation/particle_to_mat_kernels.py index bc9364f6a..576d4571c 100644 --- a/src/struphy/pic/accumulation/particle_to_mat_kernels.py +++ b/src/struphy/pic/accumulation/particle_to_mat_kernels.py @@ -5834,12 +5834,7 @@ def m_v_fill_v2_full( def mat_fill_b_v0( - args_derham: "DerhamArguments", - eta1: float, - eta2: float, - eta3: float, - mat: "float[:,:,:,:,:,:]", - fill: float, + args_derham: "DerhamArguments", eta1: float, eta2: float, eta3: float, mat: "float[:,:,:,:,:,:]", fill: float ): """ Adds the contribution of one particle to the elements of an accumulation matrix V0 -> V0. The result is returned in mat. @@ -5974,12 +5969,7 @@ def m_v_fill_b_v0( def mat_fill_b_v3( - args_derham: "DerhamArguments", - eta1: float, - eta2: float, - eta3: float, - mat: "float[:,:,:,:,:,:]", - fill: float, + args_derham: "DerhamArguments", eta1: float, eta2: float, eta3: float, mat: "float[:,:,:,:,:,:]", fill: float ): """ Adds the contribution of one particle to the elements of an accumulation matrix V3 -> V3. The result is returned in mat. @@ -6122,12 +6112,7 @@ def m_v_fill_b_v3( def mat_fill_v0( - args_derham: "DerhamArguments", - span1: int, - span2: int, - span3: int, - mat: "float[:,:,:,:,:,:]", - fill: float, + args_derham: "DerhamArguments", span1: int, span2: int, span3: int, mat: "float[:,:,:,:,:,:]", fill: float ): """ Adds the contribution of one particle to the elements of an accumulation matrix V0 -> V0. The result is returned in mat. @@ -6254,12 +6239,7 @@ def m_v_fill_v0( def mat_fill_v3( - args_derham: "DerhamArguments", - span1: int, - span2: int, - span3: int, - mat: "float[:,:,:,:,:,:]", - fill: float, + args_derham: "DerhamArguments", span1: int, span2: int, span3: int, mat: "float[:,:,:,:,:,:]", fill: float ): """ Adds the contribution of one particle to the elements of an accumulation block matrix V3 -> V3. The result is returned in mat. @@ -12969,12 +12949,7 @@ def vec_fill_v0vec( def vec_fill_b_v0( - args_derham: "DerhamArguments", - eta1: float, - eta2: float, - eta3: float, - vec: "float[:,:,:]", - fill: float, + args_derham: "DerhamArguments", eta1: float, eta2: float, eta3: float, vec: "float[:,:,:]", fill: float ): """TODO""" @@ -13152,12 +13127,7 @@ def vec_fill_b_v2( def vec_fill_b_v3( - args_derham: "DerhamArguments", - eta1: float, - eta2: float, - eta3: float, - vec: "float[:,:,:]", - fill: float, + args_derham: "DerhamArguments", eta1: float, eta2: float, eta3: float, vec: "float[:,:,:]", fill: float ): """TODO""" diff --git a/src/struphy/pic/accumulation/particles_to_grid.py b/src/struphy/pic/accumulation/particles_to_grid.py index 06d67a6df..02ded30e8 100644 --- a/src/struphy/pic/accumulation/particles_to_grid.py +++ b/src/struphy/pic/accumulation/particles_to_grid.py @@ -1,18 +1,18 @@ "Base classes for particle deposition (accumulation) on the grid." -import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from psydac.linalg.block import BlockVector from psydac.linalg.stencil import StencilMatrix, StencilVector import struphy.pic.accumulation.accum_kernels as accums import struphy.pic.accumulation.accum_kernels_gc as accums_gc +import struphy.pic.accumulation.filter_kernels as filters from struphy.feec.mass import WeightedMassOperators from struphy.feec.psydac_derham import Derham from struphy.kernel_arguments.pusher_args_kernels import DerhamArguments, DomainArguments -from struphy.pic.accumulation.filter import AccumFilter, FilterParameters from struphy.pic.base import Particles from struphy.profiling.profiling import ProfileManager +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel @@ -85,7 +85,12 @@ def __init__( *, add_vector: bool = False, symmetry: str = None, - filter_params: FilterParameters = None, + filter_params: dict = { + "use_filter": None, + "modes": None, + "repeat": None, + "alpha": None, + }, ): self._particles = particles self._space_id = space_id @@ -96,6 +101,8 @@ def __init__( self._symmetry = symmetry + self._filter_params = filter_params + self._form = self.derham.space_to_form[space_id] # initialize matrices (instances of WeightedMassOperator) @@ -172,9 +179,6 @@ def __init__( for bl in vec.blocks: self._args_data += (bl._data,) - # initialize filter - self._accfilter = AccumFilter(filter_params, self._derham, self._space_id) - def __call__(self, *optional_args, **args_control): """ Performs the accumulation into the matrix/vector by calling the chosen accumulation kernel and additional analytical contributions (control variate, optional). @@ -191,7 +195,7 @@ def __call__(self, *optional_args, **args_control): Entries must be pyccel-conform types. args_control : any - Keyword arguments for an analytical control variate correction in the accumulation step. Possible keywords are 'control_vec' for a vector correction or 'control_mat' for a matrix correction. Values are a 1d (vector) or 2d (matrix) list with callables or xp.ndarrays used for the correction. + Keyword arguments for an analytical control variate correction in the accumulation step. Possible keywords are 'control_vec' for a vector correction or 'control_mat' for a matrix correction. Values are a 1d (vector) or 2d (matrix) list with callables or np.ndarrays used for the correction. """ # flags for break @@ -213,13 +217,52 @@ def __call__(self, *optional_args, **args_control): ) # apply filter - if self.accfilter.params.use_filter is not None: + if self.filter_params["use_filter"] is not None: for vec in self._vectors: vec.exchange_assembly_data() vec.update_ghost_regions() - self.accfilter(vec) - vec_finished = True + if self.filter_params["use_filter"] == "fourier_in_tor": + self.apply_toroidal_fourier_filter(vec, self.filter_params["modes"]) + + elif self.filter_params["use_filter"] == "three_point": + for _ in range(self.filter_params["repeat"]): + for i in range(3): + filters.apply_three_point_filter( + vec[i]._data, + np.array(self.derham.Nel), + np.array(self.derham.spl_kind), + np.array(self.derham.p), + np.array(self.derham.Vh[self.form][i].starts), + np.array(self.derham.Vh[self.form][i].ends), + alpha=self.filter_params["alpha"], + ) + + vec.update_ghost_regions() + + elif self.filter_params["use_filter"] == "hybrid": + self.apply_toroidal_fourier_filter(vec, self.filter_params["modes"]) + + for _ in range(self.filter_params["repeat"]): + for i in range(2): + filters.apply_three_point_filter( + vec[i]._data, + np.array(self.derham.Nel), + np.array(self.derham.spl_kind), + np.array(self.derham.p), + np.array(self.derham.Vh[self.form][i].starts), + np.array(self.derham.Vh[self.form][i].ends), + alpha=self.filter_params["alpha"], + ) + + vec.update_ghost_regions() + + else: + raise NotImplemented( + "The type of filter must be fourier or three_point.", + ) + + vec_finished = True if self.particles.clone_config is None: num_clones = 1 @@ -353,9 +396,14 @@ def vectors(self): return out @property - def accfilter(self): - """Callable filters""" - return self._accfilter + def filter_params(self): + """Dict of three components for the accumulation filter parameters: use_filter(string), repeat(int) and alpha(float).""" + return self._filter_params + + @property + def filter_params(self): + """Dict of three components for the accumulation filter parameters: use_filter(string), repeat(int) and alpha(float).""" + return self._filter_params def init_control_variate(self, mass_ops): """Set up the use of noise reduction by control variate.""" @@ -365,6 +413,55 @@ def init_control_variate(self, mass_ops): # L2 projector for dofs self._get_L2dofs = L2Projector(self.space_id, mass_ops).get_dofs + def apply_toroidal_fourier_filter(self, vec, modes): + """ + Applying fourier filter to the spline coefficients of the accumulated vector (toroidal direction). + + Parameters + ---------- + vec : BlockVector + + modes : list + Mode numbers which are not filtered out. + """ + + from scipy.fft import irfft, rfft + + tor_Nel = self.derham.Nel[2] + + # Nel along the toroidal direction must be equal or bigger than 2*maximum mode + assert tor_Nel >= 2 * max(modes) + + pn = self.derham.p + ir = np.empty(3, dtype=int) + + if (tor_Nel % 2) == 0: + vec_temp = np.zeros(int(tor_Nel / 2) + 1, dtype=complex) + else: + vec_temp = np.zeros(int((tor_Nel - 1) / 2) + 1, dtype=complex) + + # no domain decomposition along the toroidal direction + assert self.derham.domain_decomposition.nprocs[2] == 1 + + for axis in range(3): + starts = self.derham.Vh[ſelf.form][axis].starts + ends = self.derham.Vh[self.form][axis].ends + + # index range + for i in range(3): + ir[i] = ends[i] + 1 - starts[i] + + # filtering + for i in range(ir[0]): + for j in range(ir[1]): + vec_temp[:] = 0 + vec_temp[modes] = rfft( + vec[axis]._data[pn[0] + i, pn[1] + j, pn[2] : pn[2] + ir[2]], + )[modes] + vec[axis]._data[pn[0] + i, pn[1] + j, pn[2] : pn[2] + ir[2]] = irfft(vec_temp, n=tor_Nel) + + vec.update_ghost_regions() + def show_accumulated_spline_field(self, mass_ops: WeightedMassOperators, eta_direction=0, component=0): r"""1D plot of the spline field corresponding to the accumulated vector. The latter can be viewed as the rhs of an L2-projection: @@ -388,7 +485,7 @@ def show_accumulated_spline_field(self, mass_ops: WeightedMassOperators, eta_dir field.vector = a # plot field - eta = xp.linspace(0, 1, 100) + eta = np.linspace(0, 1, 100) if eta_direction == 0: args = (eta, 0.5, 0.5) elif eta_direction == 1: @@ -437,7 +534,6 @@ def __init__( kernel: Pyccelkernel, mass_ops: WeightedMassOperators, args_domain: DomainArguments, - filter_params: FilterParameters = None, ): self._particles = particles self._space_id = space_id @@ -491,9 +587,6 @@ def __init__( for bl in vec.blocks: self._args_data += (bl._data,) - # initialize filter - self._accfilter = AccumFilter(filter_params, self._derham, self._space_id) - def __call__(self, *optional_args, **args_control): """ Performs the accumulation into the vector by calling the chosen accumulation kernel @@ -510,7 +603,7 @@ def __call__(self, *optional_args, **args_control): args_control : any Keyword arguments for an analytical control variate correction in the accumulation step. Possible keywords are 'control_vec' for a vector correction or 'control_mat' for a matrix correction. - Values are a 1d (vector) or 2d (matrix) list with callables or xp.ndarrays used for the correction. + Values are a 1d (vector) or 2d (matrix) list with callables or np.ndarrays used for the correction. """ # flags for break @@ -530,15 +623,6 @@ def __call__(self, *optional_args, **args_control): *optional_args, ) - # apply filter - if self.accfilter.params.use_filter is not None: - for vec in self._vectors: - vec.exchange_assembly_data() - vec.update_ghost_regions() - - self.accfilter(vec) - vec_finished = True - if self.particles.clone_config is None: num_clones = 1 else: @@ -608,11 +692,6 @@ def vectors(self): return out - @property - def accfilter(self): - """Callable filters""" - return self._accfilter - def init_control_variate(self, mass_ops): """Set up the use of noise reduction by control variate.""" @@ -644,7 +723,7 @@ def show_accumulated_spline_field(self, mass_ops, eta_direction=0): field.vector = a # plot field - eta = xp.linspace(0, 1, 100) + eta = np.linspace(0, 1, 100) if eta_direction == 0: args = (eta, 0.5, 0.5) elif eta_direction == 1: diff --git a/src/struphy/pic/base.py b/src/struphy/pic/base.py index 84900418d..d8e8cf407 100644 --- a/src/struphy/pic/base.py +++ b/src/struphy/pic/base.py @@ -14,8 +14,6 @@ class Intracomm: x = None -import cunumpy as xp -from line_profiler import profile from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI from sympy.ntheory import factorint @@ -27,11 +25,10 @@ class Intracomm: from struphy.fields_background.projected_equils import ProjectedFluidEquilibrium from struphy.geometry.base import Domain from struphy.geometry.utilities import TransformedPformComponent -from struphy.initial.base import Perturbation -from struphy.io.options import OptsLoading +from struphy.initial import perturbations from struphy.io.output_handling import DataContainer from struphy.kernel_arguments.pusher_args_kernels import MarkerArguments -from struphy.kinetic_background.base import KineticBackground, Maxwellian +from struphy.kinetic_background import maxwellians from struphy.pic import sampling_kernels, sobol_seq from struphy.pic.pushing.pusher_utilities_kernels import reflect from struphy.pic.sorting_kernels import ( @@ -48,12 +45,8 @@ class Intracomm: naive_evaluation_flat, naive_evaluation_meshgrid, ) -from struphy.pic.utilities import ( - BoundaryParameters, - LoadingParameters, - WeightsParameters, -) from struphy.utils import utils +from struphy.utils.arrays import xp as np from struphy.utils.clone_config import CloneConfig from struphy.utils.pyccel import Pyccelkernel @@ -86,6 +79,12 @@ class Particles(metaclass=ABCMeta): clone_config : CloneConfig Manages the configuration for clone-based (copied grids) parallel processing using MPI. + Np : int + Number of particles. + + ppc : int + Particles per cell. Cells are defined from ``domain_array``. + domain_decomp : tuple The first entry is a domain_array (see :attr:`~struphy.feec.psydac_derham.Derham.domain_array`) and the second entry is the number of MPI processes in each direction. @@ -94,26 +93,43 @@ class Particles(metaclass=ABCMeta): True if the dimension is to be used in the domain decomposition (=default for each dimension). If mpi_dims_mask[i]=False, the i-th dimension will not be decomposed. + ppb : int + Particles per sorting box. Boxes are defined from ``boxes_per_dim``. + boxes_per_dim : tuple Number of boxes in each logical direction (n_eta1, n_eta2, n_eta3). box_bufsize : float Between 0 and 1, relative buffer size for box array (default = 0.25). + bc : list + Either 'remove', 'reflect', 'periodic' or 'refill' in each direction. + + bc_refill : list + Either 'inner' or 'outer'. + + bc_sph : list + Boundary condition for sph density evaluation. + Either 'periodic', 'mirror', 'static' or 'force' in each direction. + type : str Either 'full_f' (default), 'delta_f' or 'sph'. + control_variate : bool + Whether to use a control variate for noise reduction (only if type is 'full_f' or 'sph'). + name : str Name of particle species. - loading_params : LoadingParameters - Parameterts for particle loading. + loading : str + Drawing of markers; either 'pseudo_random', 'sobol_standard', + 'sobol_antithetic', 'external' or 'restart'. - weights_params : WeightsParameters - Parameters for particle weights. + loading_params : dict + Parameterts for loading, see defaults below. - boundary_params : BoundaryParameters - Parameters for particle boundary conditions. + weights_params : dict + Parameterts for initializing weights, see defaults below. bufsize : float Size of buffer (as multiple of total size, default=.25) in markers array. @@ -127,16 +143,10 @@ class Particles(metaclass=ABCMeta): projected_equil : ProjectedFluidEquilibrium Struphy fluid equilibrium projected into a discrete Derham complex. - background : KineticBackground - Kinetic background. - - initial_condition : KineticBackground - Kinetic initial condition. + bckgr_params : dict + Kinetic background parameters. - n_as_volume_form: bool - Whether the number density n is given as a volume form or scalar function (=default). - - perturbations : Perturbation | list + pert_params : dict Kinetic perturbation parameters. equation_params : dict @@ -150,23 +160,28 @@ def __init__( self, comm_world: Intracomm = None, clone_config: CloneConfig = None, + Np: int = None, + ppc: int = None, domain_decomp: tuple = None, mpi_dims_mask: tuple | list = None, + ppb: int = 10, boxes_per_dim: tuple | list = None, box_bufsize: float = 5.0, + bc: list = None, + bc_refill: str = None, + bc_sph: str = None, type: str = "full_f", + control_variate: bool = False, name: str = "some_name", - loading_params: LoadingParameters = None, - weights_params: WeightsParameters = None, - boundary_params: BoundaryParameters = None, + loading: str = "pseudo_random", + loading_params: dict = None, + weights_params: dict = None, bufsize: float = 0.25, domain: Domain = None, equil: FluidEquilibrium = None, projected_equil: ProjectedFluidEquilibrium = None, - background: KineticBackground | FluidEquilibrium = None, - initial_condition: KineticBackground = None, - perturbations: dict[str, Perturbation] = None, - n_as_volume_form: bool = False, + bckgr_params: dict = None, + pert_params: dict = None, equation_params: dict = None, verbose: bool = False, ): @@ -180,21 +195,8 @@ def __init__( self._num_clones = self.clone_config.num_clones self._clone_id = self.clone_config.clone_id - # defaults - if loading_params is None: - loading_params = LoadingParameters() - - if weights_params is None: - weights_params = WeightsParameters() - - if boundary_params is None: - boundary_params = BoundaryParameters() - # other parameters self._name = name - self._loading_params = loading_params - self._weights_params = weights_params - self._boundary_params = boundary_params self._domain = domain self._equil = equil self._projected_equil = projected_equil @@ -221,29 +223,26 @@ def __init__( self._nprocs = domain_decomp[1] # total number of cells (equal to mpi_size if no grid) - n_cells = xp.sum(xp.prod(self.domain_array[:, 2::3], axis=1, dtype=int)) * self.num_clones - # if verbose: - # print(f"\n{self.mpi_rank = }, {n_cells = }") + n_cells = np.sum(np.prod(self.domain_array[:, 2::3], axis=1, dtype=int)) * self.num_clones + if verbose: + print(f"{self.mpi_rank = }, {n_cells = }") # total number of boxes if self.boxes_per_dim is None: n_boxes = self.mpi_size * self.num_clones else: assert all([nboxes >= nproc for nboxes, nproc in zip(self.boxes_per_dim, self.nprocs)]), ( - f"There must be at least one box {self.boxes_per_dim =} on each process {self.nprocs =} in each direction." + f"There must be at least one box {self.boxes_per_dim = } on each process {self.nprocs = } in each direction." ) assert all([nboxes % nproc == 0 for nboxes, nproc in zip(self.boxes_per_dim, self.nprocs)]), ( - f"Number of boxes {self.boxes_per_dim =} must be divisible by number of processes {self.nprocs =} in each direction." + f"Number of boxes {self.boxes_per_dim = } must be divisible by number of processes {self.nprocs = } in each direction." ) - n_boxes = xp.prod(self.boxes_per_dim, dtype=int) * self.num_clones + n_boxes = np.prod(self.boxes_per_dim, dtype=int) * self.num_clones - # if verbose: - # print(f"\n{self.mpi_rank = }, {n_boxes = }") + if verbose: + print(f"{self.mpi_rank = }, {n_boxes = }") # total number of markers (Np) and particles per cell (ppc) - Np = self.loading_params.Np - ppc = self.loading_params.ppc - ppb = self.loading_params.ppb if Np is not None: self._Np = int(Np) self._ppc = self.Np / n_cells @@ -264,8 +263,6 @@ def __init__( self._allocate_marker_array() # boundary conditions - bc = boundary_params.bc - bc_refill = boundary_params.bc_refill if bc is None: bc = ["periodic", "periodic", "periodic"] @@ -284,67 +281,87 @@ def __init__( self._remove_axes = [axis for axis, b_c in enumerate(bc) if b_c == "remove"] self._bc_refill = bc_refill - bc_sph = boundary_params.bc_sph if bc_sph is None: bc_sph = [bci if bci == "periodic" else "mirror" for bci in self.bc] for bci in bc_sph: assert bci in ("periodic", "mirror", "fixed") + self._bc_sph = bc_sph # particle type assert type in ("full_f", "delta_f", "sph") self._type = type + self._control_variate = control_variate # initialize sorting boxes self._verbose = verbose self._initialize_sorting_boxes() # particle loading parameters - self._loading = loading_params.loading - self._spatial = loading_params.spatial + assert loading in ( + "pseudo_random", + "sobol_standard", + "sobol_antithetic", + "external", + "restart", + "tesselation", + ) + self._loading = loading + + loading_params_default = { + "seed": None, + "dir_particles": None, + "moments": None, + "spatial": "uniform", + "initial": None, + "n_quad": 1, + } - # weights - self._reject_weights = weights_params.reject_weights - self._threshold = weights_params.threshold - self._control_variate = weights_params.control_variate + self._loading_params = set_defaults( + loading_params, + loading_params_default, + ) + self._spatial = self.loading_params["spatial"] + + # weights parameters + weights_params_default = { + "reject_weights": False, + "threshold": 0.0, + } + + self._weights_params = set_defaults( + weights_params, + weights_params_default, + ) # background - if background is None: - raise ValueError("A background function must be passed to Particles.") - else: - self._background = background + if bckgr_params is None: + bckgr_params = {"Maxwellian3D": {}, "pforms": [None, None]} - # background p-form description in [eta, v] (False means 0-form, True means volume form -> divide by det) - if isinstance(background, FluidEquilibrium): - self._is_volume_form = (n_as_volume_form, False) + # background p-form description in [eta, v] (None means 0-form, "vol" means volume form -> divide by det) + if isinstance(bckgr_params, FluidEquilibrium): + self._bckgr_params = bckgr_params + self._pforms = [None, None] else: - self._is_volume_form = ( - n_as_volume_form, - self.background.volume_form, - ) + self._bckgr_params = copy.deepcopy(bckgr_params) + self._pforms = self.bckgr_params.pop("pforms", [None, None]) # set background function self._set_background_function() self._set_background_coordinates() - # perturbation parameters (needed for fluid background) - self._perturbations = perturbations - - # initial condition - if initial_condition is None: - self._initial_condition = self.background - else: - self._initial_condition = initial_condition + # perturbation parameters + self._pert_params = pert_params # for loading - # if self.loading_params["moments"] is None and self.type != "sph" and isinstance(self.bckgr_params, dict): - self._generate_sampling_moments() + if self.loading_params["moments"] is None and self.type != "sph" and isinstance(self.bckgr_params, dict): + self._auto_sampling_params() # create buffers for mpi_sort_markers - self._sorting_etas = xp.zeros(self.markers.shape, dtype=float) - self._is_on_proc_domain = xp.zeros((self.markers.shape[0], 3), dtype=bool) - self._can_stay = xp.zeros(self.markers.shape[0], dtype=bool) + self._sorting_etas = np.zeros(self.markers.shape, dtype=float) + self._is_on_proc_domain = np.zeros((self.markers.shape[0], 3), dtype=bool) + self._can_stay = np.zeros(self.markers.shape[0], dtype=bool) self._reqs = [None] * self.mpi_size self._recvbufs = [None] * self.mpi_size self._send_to_i = [None] * self.mpi_size @@ -352,8 +369,8 @@ def __init__( @classmethod @abstractmethod - def default_background(cls): - """The default background (of type Maxwellian).""" + def default_bckgr_params(cls): + """Dictionary holding the minimal information of the default background.""" pass @abstractmethod @@ -450,7 +467,7 @@ def type(self): return self._type @property - def loading(self) -> OptsLoading: + def loading(self): """Type of particle loading.""" return self._loading @@ -526,38 +543,25 @@ def clone_id(self): return self._clone_id @property - def background(self) -> KineticBackground: - """Kinetic background.""" - return self._background + def bckgr_params(self): + """Kinetic background parameters.""" + return self._bckgr_params @property - def perturbations(self) -> dict[str, Perturbation]: - """Kinetic perturbations, keys are the names of moments of the distribution function ("n", "u1", etc.).""" - return self._perturbations + def pert_params(self): + """Kinetic perturbation parameters.""" + return self._pert_params @property - def loading_params(self) -> LoadingParameters: + def loading_params(self): + """Parameters for marker loading.""" return self._loading_params @property - def weights_params(self) -> WeightsParameters: + def weights_params(self): + """Parameters for initializing weights.""" return self._weights_params - @property - def boundary_params(self) -> BoundaryParameters: - """Parameters for marker loading.""" - return self._boundary_params - - @property - def reject_weights(self): - """Whether to reect weights below threshold.""" - return self._reject_weights - - @property - def threshold(self): - """Threshold for rejecting weights.""" - return self._threshold - @property def boxes_per_dim(self): """Tuple, number of sorting boxes per dimension.""" @@ -573,11 +577,6 @@ def equation_params(self): """Parameters appearing in model equation due to Struphy normalization.""" return self._equation_params - @property - def initial_condition(self) -> KineticBackground: - """Kinetic initial condition""" - return self._initial_condition - @property def f_init(self): """Callable initial condition (background + perturbation). @@ -598,7 +597,7 @@ def u_init(self): return self._u_init @property - def f0(self) -> Maxwellian: + def f0(self): assert hasattr(self, "_f0"), AttributeError( "No background distribution available, please run self._set_background_function()", ) @@ -726,16 +725,16 @@ def index(self): def valid_mks(self): """Array of booleans stating if an entry in the markers array is a true local particle (not a hole or ghost).""" if not hasattr(self, "_valid_mks"): - self._valid_mks = ~xp.logical_or(self.holes, self.ghost_particles) + self._valid_mks = ~np.logical_or(self.holes, self.ghost_particles) return self._valid_mks def update_valid_mks(self): - self._valid_mks[:] = ~xp.logical_or(self.holes, self.ghost_particles) + self._valid_mks[:] = ~np.logical_or(self.holes, self.ghost_particles) @property def n_mks_loc(self): """Number of valid markers on process (without holes and ghosts).""" - return xp.count_nonzero(self.valid_mks) + return np.count_nonzero(self.valid_mks) @property def n_mks_on_each_proc(self): @@ -745,7 +744,7 @@ def n_mks_on_each_proc(self): @property def n_mks_on_clone(self): """Number of valid markers on current clone (without holes and ghosts).""" - return xp.sum(self.n_mks_on_each_proc) + return np.sum(self.n_mks_on_each_proc) @property def n_mks_on_each_clone(self): @@ -755,7 +754,7 @@ def n_mks_on_each_clone(self): @property def n_mks_global(self): """Number of valid markers on current clone (without holes and ghosts).""" - return xp.sum(self.n_mks_on_each_clone) + return np.sum(self.n_mks_on_each_clone) @property def positions(self): @@ -764,7 +763,7 @@ def positions(self): @positions.setter def positions(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) assert new.shape == (self.n_mks_loc, 3) self._markers[self.valid_mks, self.index["pos"]] = new @@ -775,8 +774,8 @@ def velocities(self): @velocities.setter def velocities(self, new): - assert isinstance(new, xp.ndarray) - assert new.shape == (self.n_mks_loc, self.vdim), f"{self.n_mks_loc =} and {self.vdim =} but {new.shape =}" + assert isinstance(new, np.ndarray) + assert new.shape == (self.n_mks_loc, self.vdim), f"{self.n_mks_loc = } and {self.vdim = } but {new.shape = }" self._markers[self.valid_mks, self.index["vel"]] = new @property @@ -786,7 +785,7 @@ def phasespace_coords(self): @phasespace_coords.setter def phasespace_coords(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) assert new.shape == (self.n_mks_loc, 3 + self.vdim) self._markers[self.valid_mks, self.index["coords"]] = new @@ -797,7 +796,7 @@ def weights(self): @weights.setter def weights(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) assert new.shape == (self.n_mks_loc,) self._markers[self.valid_mks, self.index["weights"]] = new @@ -808,7 +807,7 @@ def sampling_density(self): @sampling_density.setter def sampling_density(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) assert new.shape == (self.n_mks_loc,) self._markers[self.valid_mks, self.index["s0"]] = new @@ -819,7 +818,7 @@ def weights0(self): @weights0.setter def weights0(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) assert new.shape == (self.n_mks_loc,) self._markers[self.valid_mks, self.index["w0"]] = new @@ -830,14 +829,16 @@ def marker_ids(self): @marker_ids.setter def marker_ids(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) assert new.shape == (self.n_mks_loc,) self._markers[self.valid_mks, self.index["ids"]] = new @property - def is_volume_form(self): - """Tuple of size 2 for (position, velocity), defining the p-form representation of f_init: True means volume-form, False means 0-form.""" - return self._is_volume_form + def pforms(self): + """Tuple of size 2; each entry must be either "vol" or None, defining the p-form + (space and velocity, respectively) of f_init. + """ + return self._pforms @property def spatial(self): @@ -861,7 +862,7 @@ def f_coords(self): @f_coords.setter def f_coords(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) self.markers[self.valid_mks, self.f_coords_index] = new @property @@ -873,16 +874,16 @@ def args_markers(self): def f_jacobian_coords(self): """Coordinates of the velocity jacobian determinant of the distribution fuction.""" if isinstance(self.f_jacobian_coords_index, list): - return self.markers[xp.ix_(~self.holes, self.f_jacobian_coords_index)] + return self.markers[np.ix_(~self.holes, self.f_jacobian_coords_index)] else: return self.markers[~self.holes, self.f_jacobian_coords_index] @f_jacobian_coords.setter def f_jacobian_coords(self, new): - assert isinstance(new, xp.ndarray) + assert isinstance(new, np.ndarray) if isinstance(self.f_jacobian_coords_index, list): self.markers[ - xp.ix_( + np.ix_( ~self.holes, self.f_jacobian_coords_index, ) @@ -929,7 +930,7 @@ def _get_domain_decomp(self, mpi_dims_mask: tuple | list = None): Returns ------- - dom_arr : xp.ndarray + dom_arr : np.ndarray A 2d array of shape (#MPI processes, 9). The row index denotes the process rank. The columns are for n=0,1,2: - arr[i, 3*n + 0] holds the LEFT domain boundary of process i in direction eta_(n+1). - arr[i, 3*n + 1] holds the RIGHT domain boundary of process i in direction eta_(n+1). @@ -941,7 +942,7 @@ def _get_domain_decomp(self, mpi_dims_mask: tuple | list = None): if mpi_dims_mask is None: mpi_dims_mask = [True, True, True] - dom_arr = xp.zeros((self.mpi_size, 9), dtype=float) + dom_arr = np.zeros((self.mpi_size, 9), dtype=float) # factorize mpi size factors = factorint(self.mpi_size) @@ -965,10 +966,10 @@ def _get_domain_decomp(self, mpi_dims_mask: tuple | list = None): mm = (mm + 1) % 3 nprocs[mm] *= fac - assert xp.prod(nprocs) == self.mpi_size + assert np.prod(nprocs) == self.mpi_size # domain decomposition - breaks = [xp.linspace(0.0, 1.0, nproc + 1) for nproc in nprocs] + breaks = [np.linspace(0.0, 1.0, nproc + 1) for nproc in nprocs] # fill domain array for n in range(self.mpi_size): @@ -991,35 +992,38 @@ def _get_domain_decomp(self, mpi_dims_mask: tuple | list = None): return dom_arr, tuple(nprocs) def _set_background_function(self): - self._f0 = self.background - - # if isinstance(self.background, FluidEquilibrium): - # self._f0 = self.background - # else: - # self._f0 = copy.deepcopy(self.background) - # self.f0.add_perturbation = False - - # self._f0 = None - # if isinstance(self.bckgr_params, FluidEquilibrium): - # self._f0 = self.bckgr_params - # else: - # for bckgr in self.backgrounds: - # # SPH case: f0 is set to a FluidEquilibrium - # if self.type == "sph": - # _eq = getattr(equils, fi_type)(**maxw_params) - # if not isinstance(_eq, NumericalFluidEquilibrium): - # _eq.domain = self.domain - # if self._f0 is None: - # self._f0 = _eq - # else: - # raise NotImplementedError("Summation of fluid backgrounds not yet implemented.") - # # self._f0 = self._f0 + (lambda e1, e2, e3: _eq.n0(e1, e2, e3)) - # # default case - # else: - # if self._f0 is None: - # self._f0 = bckgr - # else: - # self._f0 = self._f0 + bckgr + self._f0 = None + if isinstance(self.bckgr_params, FluidEquilibrium): + self._f0 = self.bckgr_params + else: + for fi, maxw_params in self.bckgr_params.items(): + if fi[-2] == "_": + fi_type = fi[:-2] + else: + fi_type = fi + + # SPH case: f0 is set to a FluidEquilibrium + if self.type == "sph": + _eq = getattr(equils, fi_type)(**maxw_params) + if not isinstance(_eq, NumericalFluidEquilibrium): + _eq.domain = self.domain + if self._f0 is None: + self._f0 = _eq + else: + raise NotImplementedError("Summation of fluid backgrounds not yet implemented.") + # self._f0 = self._f0 + (lambda e1, e2, e3: _eq.n0(e1, e2, e3)) + # default case + else: + if self._f0 is None: + self._f0 = getattr(maxwellians, fi_type)( + maxw_params=maxw_params, + equil=self.equil, + ) + else: + self._f0 = self._f0 + getattr(maxwellians, fi_type)( + maxw_params=maxw_params, + equil=self.equil, + ) def _set_background_coordinates(self): if self.type != "sph" and self.f0.coords == "constants_of_motion": @@ -1057,14 +1061,14 @@ def _n_mks_load_and_Np_per_clone(self): """Return two arrays: 1) an array of sub_comm.size where the i-th entry corresponds to the number of markers drawn on process i, and 2) an array of size num_clones where the i-th entry corresponds to the number of markers on clone i.""" # number of cells on current process - n_cells_loc = xp.prod( + n_cells_loc = np.prod( self.domain_array[self.mpi_rank, 2::3], dtype=int, ) # array of number of markers on each process at loading stage if self.clone_config is not None: - _n_cells_clone = xp.sum(xp.prod(self.domain_array[:, 2::3], axis=1, dtype=int)) + _n_cells_clone = np.sum(np.prod(self.domain_array[:, 2::3], axis=1, dtype=int)) _n_mks_load_tot = self.clone_config.get_Np_clone(self.Np) _ppc = _n_mks_load_tot / _n_cells_clone else: @@ -1074,14 +1078,14 @@ def _n_mks_load_and_Np_per_clone(self): n_mks_load = self._gather_scalar_in_subcomm_array(int(_ppc * n_cells_loc)) # add deviation from Np to rank 0 - n_mks_load[0] += _n_mks_load_tot - xp.sum(n_mks_load) + n_mks_load[0] += _n_mks_load_tot - np.sum(n_mks_load) # check if all markers are there - assert xp.sum(n_mks_load) == _n_mks_load_tot + assert np.sum(n_mks_load) == _n_mks_load_tot # Np on each clone Np_per_clone = self._gather_scalar_in_intercomm_array(_n_mks_load_tot) - assert xp.sum(Np_per_clone) == self.Np + assert np.sum(Np_per_clone) == self.Np return n_mks_load, Np_per_clone @@ -1092,23 +1096,23 @@ def _allocate_marker_array(self): # number of markers on the local process at loading stage n_mks_load_loc = self.n_mks_load[self._mpi_rank] - bufsize = self.bufsize + 1.0 / xp.sqrt(n_mks_load_loc) + bufsize = self.bufsize + 1.0 / np.sqrt(n_mks_load_loc) # allocate markers array (3 x positions, vdim x velocities, weight, s0, w0, ..., ID) with buffer self._n_rows = round(n_mks_load_loc * (1 + bufsize)) - self._markers = xp.zeros((self.n_rows, self.n_cols), dtype=float) + self._markers = np.zeros((self.n_rows, self.n_cols), dtype=float) # allocate auxiliary arrays - self._holes = xp.zeros(self.n_rows, dtype=bool) - self._ghost_particles = xp.zeros(self.n_rows, dtype=bool) - self._valid_mks = xp.zeros(self.n_rows, dtype=bool) - self._is_outside_right = xp.zeros(self.n_rows, dtype=bool) - self._is_outside_left = xp.zeros(self.n_rows, dtype=bool) - self._is_outside = xp.zeros(self.n_rows, dtype=bool) + self._holes = np.zeros(self.n_rows, dtype=bool) + self._ghost_particles = np.zeros(self.n_rows, dtype=bool) + self._valid_mks = np.zeros(self.n_rows, dtype=bool) + self._is_outside_right = np.zeros(self.n_rows, dtype=bool) + self._is_outside_left = np.zeros(self.n_rows, dtype=bool) + self._is_outside = np.zeros(self.n_rows, dtype=bool) # create array container (3 x positions, vdim x velocities, weight, s0, w0, ID) for removed markers self._n_lost_markers = 0 - self._lost_markers = xp.zeros((int(self.n_rows * 0.5), 10), dtype=float) + self._lost_markers = np.zeros((int(self.n_rows * 0.5), 10), dtype=float) # arguments for kernels self._args_markers = MarkerArguments( @@ -1126,7 +1130,7 @@ def _allocate_marker_array(self): # Have at least 3 spare places in markers array assert self.args_markers.first_free_idx + 2 < self.n_cols - 1, ( - f"{self.args_markers.first_free_idx + 2} is not smaller than {self.n_cols - 1 =}; not enough columns in marker array !!" + f"{self.args_markers.first_free_idx + 2} is not smaller than {self.n_cols - 1 = }; not enough columns in marker array !!" ) def _initialize_sorting_boxes(self): @@ -1167,7 +1171,7 @@ def _initialize_sorting_boxes(self): bc_sph=self.bc_sph, is_domain_boundary=is_domain_boundary, comm=self.mpi_comm, - verbose=False, + verbose=self.verbose, box_bufsize=self._box_bufsize, ) @@ -1179,144 +1183,102 @@ def _initialize_sorting_boxes(self): else: self._sorting_boxes = None - def _generate_sampling_moments(self): - """Automatically determine moments for sampling distribution (Gaussian) from the given background.""" - - if self.loading_params.moments is None: - self.loading_params.moments = tuple([0.0] * self.vdim + [1.0] * self.vdim) - - # TODO: reformulate this function with KineticBackground methods + def _auto_sampling_params(self): + """Automatically determine sampling parameters from the background given""" + ns = [] + us = [] + vths = [] - # ns = [] - # us = [] - # vths = [] - - # for fi, params in self.bckgr_params.items(): - # if fi[-2] == "_": - # fi_type = fi[:-2] - # else: - # fi_type = fi + for fi, params in self.bckgr_params.items(): + if fi[-2] == "_": + fi_type = fi[:-2] + else: + fi_type = fi - # us.append([]) - # vths.append([]) + us.append([]) + vths.append([]) - # bckgr = getattr(maxwellians, fi_type) + bckgr = getattr(maxwellians, fi_type) + default_maxw_params = bckgr.default_maxw_params() - # for key in default_maxw_params: - # if key[0] == "n": - # if key in params: - # ns += [params[key]] - # else: - # ns += [1.0] + for key in default_maxw_params: + if key[0] == "n": + if key in params: + ns += [params[key]] + else: + ns += [1.0] - # elif key[0] == "u": - # if key in params: - # us[-1] += [params[key]] - # else: - # us[-1] += [0.0] + elif key[0] == "u": + if key in params: + us[-1] += [params[key]] + else: + us[-1] += [0.0] - # elif key[0] == "v": - # if key in params: - # vths[-1] += [params[key]] - # else: - # vths[-1] += [1.0] + elif key[0] == "v": + if key in params: + vths[-1] += [params[key]] + else: + vths[-1] += [1.0] - # assert len(ns) == len(us) == len(vths) + assert len(ns) == len(us) == len(vths) - # ns = xp.array(ns) - # us = xp.array(us) - # vths = xp.array(vths) + ns = np.array(ns) + us = np.array(us) + vths = np.array(vths) # Use the mean of shifts and thermal velocity such that outermost shift+thermal is # new shift + new thermal - # mean_us = xp.mean(us, axis=0) - # us_ext = us + vths * xp.where(us >= 0, 1, -1) - # us_ext_dist = us_ext - mean_us[None, :] - # new_vths = xp.max(xp.abs(us_ext_dist), axis=0) - - # new_moments = [] - - # new_moments += [*mean_us] - # new_moments += [*new_vths] - # new_moments = [float(moment) for moment in new_moments] - - # self.loading_params["moments"] = new_moments - - def _set_initial_condition(self): - if self.type != "sph": - self._f_init = self.initial_condition - else: - # Get the initialization function and pass the correct arguments - assert isinstance(self.f0, FluidEquilibrium) - self._u_init = self.f0.u_cart - - if self.perturbations is not None: - for ( - moment, - pert, - ) in self.perturbations.items(): # only one perturbation is taken into account at the moment - assert isinstance(moment, str) - if pert is None: - continue - assert isinstance(pert, Perturbation) - - if moment == "n": - _fun = TransformedPformComponent( - pert, - pert.given_in_basis, - "0", - comp=pert.comp, - domain=self.domain, - ) - elif moment == "u1": - _fun = TransformedPformComponent( - pert, - pert.given_in_basis, - "v", - comp=pert.comp, - domain=self.domain, - ) - _fun_cart = lambda e1, e2, e3: self.domain.push(_fun, e1, e2, e3, kind="v") - self._u_init = lambda e1, e2, e3: self.f0.u_cart(e1, e2, e3)[0] + _fun_cart(e1, e2, e3) - # TODO: add other velocity components - else: - _fun = None + mean_us = np.mean(us, axis=0) + us_ext = us + vths * np.where(us >= 0, 1, -1) + us_ext_dist = us_ext - mean_us[None, :] + new_vths = np.max(np.abs(us_ext_dist), axis=0) - def _f_init(*etas, flat_eval=False): - if len(etas) == 1: - if _fun is None: - out = self.f0.n0(etas[0]) - else: - out = self.f0.n0(etas[0]) + _fun(*etas[0].T) - else: - assert len(etas) == 3 - E1, E2, E3, is_sparse_meshgrid = Domain.prepare_eval_pts( - etas[0], - etas[1], - etas[2], - flat_eval=flat_eval, - ) + new_moments = [] - out0 = self.f0.n0(E1, E2, E3) + new_moments += [*mean_us] + new_moments += [*new_vths] + new_moments = [float(moment) for moment in new_moments] - if _fun is None: - out = out0 - else: - out1 = _fun(E1, E2, E3) - assert out0.shape == out1.shape - out = out0 + out1 + self.loading_params["moments"] = new_moments - if flat_eval: - out = xp.squeeze(out) + def _set_initial_condition(self, bp_copy=None, pp_copy=None): + """Compute callable initial condition from background + perturbation.""" - return out + if bp_copy is None: + bp_copy = copy.deepcopy(self.bckgr_params) + if pp_copy is None: + pp_copy = copy.deepcopy(self.pert_params) - self._f_init = _f_init + # Get the initialization function and pass the correct arguments + self._f_init = None + for fi, maxw_params in bp_copy.items(): + if fi[-2] == "_": + fi_type = fi[:-2] + else: + fi_type = fi + + pert_params = pp_copy + if pp_copy is not None: + if fi in pp_copy: + pert_params = pp_copy[fi] + + if self._f_init is None: + self._f_init = getattr(maxwellians, fi_type)( + maxw_params=maxw_params, + pert_params=pert_params, + equil=self.equil, + ) + else: + self._f_init = self._f_init + getattr(maxwellians, fi_type)( + maxw_params=maxw_params, + pert_params=pert_params, + equil=self.equil, + ) def _load_external( self, n_mks_load_loc: int, - n_mks_load_cum_sum: xp.ndarray, + n_mks_load_cum_sum: np.ndarray, ): """Load markers from external .hdf5 file. @@ -1325,12 +1287,12 @@ def _load_external( n_mks_load_loc: int Number of markers on the local process at loading stage. - n_mks_load_cum_sum: xp.ndarray + n_mks_load_cum_sum: np.ndarray Cumulative sum of number of markers on each process at loading stage. """ if self.mpi_rank == 0: file = h5py.File( - self.loading_params.dir_external, + self.loading_params["dir_external"], "r", ) print(f"\nLoading markers from file: {file}") @@ -1349,7 +1311,7 @@ def _load_external( file.close() else: - recvbuf = xp.zeros( + recvbuf = np.zeros( (n_mks_load_loc, self.markers.shape[1]), dtype=float, ) @@ -1363,16 +1325,16 @@ def _load_restart(self): o_path = state["o_path"] - if self.loading_params.dir_particles_abs is None: + if self.loading_params["dir_particles_abs"] is None: data_path = os.path.join( o_path, - self.loading_params.dir_particles, + self.loading_params["dir_particles"], ) else: - data_path = self.loading_params.dir_particles_abs + data_path = self.loading_params["dir_particles_abs"] data = DataContainer(data_path, comm=self.mpi_comm) - self._markers[:, :] = data.file["restart/" + self.loading_params.restart_key][-1, :, :] + self._markers[:, :] = data.file["restart/" + self.loading_params["key"]][-1, :, :] def _load_tesselation(self, n_quad: int = 1): """ @@ -1491,8 +1453,8 @@ def draw_markers( self.update_ghost_particles() # cumulative sum of number of markers on each process at loading stage. - n_mks_load_cum_sum = xp.cumsum(self.n_mks_load) - Np_per_clone_cum_sum = xp.cumsum(self.Np_per_clone) + n_mks_load_cum_sum = np.cumsum(self.n_mks_load) + Np_per_clone_cum_sum = np.cumsum(self.Np_per_clone) _first_marker_id = (Np_per_clone_cum_sum - self.Np_per_clone)[self.clone_id] + ( n_mks_load_cum_sum - self.n_mks_load )[self._mpi_rank] @@ -1520,21 +1482,21 @@ def draw_markers( self._load_tesselation() if self.type == "sph": self._set_initial_condition() - self.velocities = xp.array(self.u_init(self.positions)[0]).T + self.velocities = np.array(self.u_init(self.positions)[0]).T # set markers ID in last column - self.marker_ids = _first_marker_id + xp.arange(n_mks_load_loc, dtype=float) + self.marker_ids = _first_marker_id + np.arange(n_mks_load_loc, dtype=float) else: if self.mpi_rank == 0 and verbose: print("\nLoading fresh markers:") - for key, val in self.loading_params.__dict__.items(): + for key, val in self.loading_params.items(): print((key + " :").ljust(25), val) # 1. standard random number generator (pseudo-random) if self.loading == "pseudo_random": # set seed - _seed = self.loading_params.seed + _seed = self.loading_params["seed"] if _seed is not None: - xp.random.seed(_seed) + np.random.seed(_seed) # counting integers num_loaded_particles_loc = 0 # number of particles alreday loaded (local) @@ -1545,15 +1507,15 @@ def draw_markers( while num_loaded_particles_glob < int(self.Np): # Generate a chunk of random particles num_to_add_glob = min(chunk_size, int(self.Np) - num_loaded_particles_glob) - temp = xp.random.rand(num_to_add_glob, 3 + self.vdim) + temp = np.random.rand(num_to_add_glob, 3 + self.vdim) # check which particles are on the current process domain - is_on_proc_domain = xp.logical_and( + is_on_proc_domain = np.logical_and( temp[:, :3] > self.domain_array[self.mpi_rank, 0::3], temp[:, :3] < self.domain_array[self.mpi_rank, 1::3], ) - valid_idx = xp.nonzero(xp.all(is_on_proc_domain, axis=1))[0] + valid_idx = np.nonzero(np.all(is_on_proc_domain, axis=1))[0] valid_particles = temp[valid_idx] - valid_particles = xp.array_split(valid_particles, self.num_clones)[self.clone_id] + valid_particles = np.array_split(valid_particles, self.num_clones)[self.clone_id] num_valid = valid_particles.shape[0] # Add the valid particles to the phasespace_coords array @@ -1565,12 +1527,12 @@ def draw_markers( num_loaded_particles_loc += num_valid # make sure all particles are loaded - assert self.Np == int(num_loaded_particles_glob), f"{self.Np =}, {int(num_loaded_particles_glob) =}" + assert self.Np == int(num_loaded_particles_glob), f"{self.Np = }, {int(num_loaded_particles_glob) = }" # set new n_mks_load self._gather_scalar_in_subcomm_array(num_loaded_particles_loc, out=self.n_mks_load) n_mks_load_loc = self.n_mks_load[self.mpi_rank] - n_mks_load_cum_sum = xp.cumsum(self.n_mks_load) + n_mks_load_cum_sum = np.cumsum(self.n_mks_load) # set new holes in markers array to -1 self._markers[num_loaded_particles_loc:] = -1.0 @@ -1610,11 +1572,11 @@ def draw_markers( # initial velocities - SPH case: v(0) = u(x(0)) for given velocity u(x) if self.type == "sph": self._set_initial_condition() - self.velocities = xp.array(self.u_init(self.positions)[0]).T + self.velocities = np.array(self.u_init(self.positions)[0]).T else: # inverse transform sampling in velocity space - u_mean = xp.array(self.loading_params.moments[: self.vdim]) - v_th = xp.array(self.loading_params.moments[self.vdim :]) + u_mean = np.array(self.loading_params["moments"][: self.vdim]) + v_th = np.array(self.loading_params["moments"][self.vdim :]) # Particles6D: (1d Maxwellian, 1d Maxwellian, 1d Maxwellian) if self.vdim == 3: @@ -1622,7 +1584,7 @@ def draw_markers( sp.erfinv( 2 * self.velocities - 1, ) - * xp.sqrt(2) + * np.sqrt(2) * v_th + u_mean ) @@ -1632,16 +1594,16 @@ def draw_markers( sp.erfinv( 2 * self.velocities[:, 0] - 1, ) - * xp.sqrt(2) + * np.sqrt(2) * v_th[0] + u_mean[0] ) self._markers[:n_mks_load_loc, 4] = ( - xp.sqrt( - -1 * xp.log(1 - self.velocities[:, 1]), + np.sqrt( + -1 * np.log(1 - self.velocities[:, 1]), ) - * xp.sqrt(2) + * np.sqrt(2) * v_th[1] + u_mean[1] ) @@ -1654,17 +1616,17 @@ def draw_markers( # inversion method for drawing uniformly on the disc if self.spatial == "disc": - self._markers[:n_mks_load_loc, 0] = xp.sqrt( + self._markers[:n_mks_load_loc, 0] = np.sqrt( self._markers[:n_mks_load_loc, 0], ) else: assert self.spatial == "uniform", f'Spatial drawing must be "uniform" or "disc", is {self.spatial}.' - self.marker_ids = _first_marker_id + xp.arange(n_mks_load_loc, dtype=float) + self.marker_ids = _first_marker_id + np.arange(n_mks_load_loc, dtype=float) # set specific initial condition for some particles - if self.loading_params.specific_markers is not None: - specific_markers = self.loading_params.specific_markers + if self.loading_params["initial"] is not None: + specific_markers = self.loading_params["initial"] counter = 0 for i in range(len(specific_markers)): @@ -1681,8 +1643,8 @@ def draw_markers( # check if all particle positions are inside the unit cube [0, 1]^3 n_mks_load_loc = self.n_mks_load[self._mpi_rank] - assert xp.all(~self.holes[:n_mks_load_loc]) - assert xp.all(self.holes[n_mks_load_loc:]) + assert np.all(~self.holes[:n_mks_load_loc]) + assert np.all(self.holes[n_mks_load_loc:]) if self._initialized_sorting and sort: if self.mpi_rank == 0 and verbose: @@ -1691,7 +1653,6 @@ def draw_markers( self.mpi_sort_markers() self.do_sort() - @profile def mpi_sort_markers( self, apply_bc: bool = True, @@ -1755,8 +1716,8 @@ def mpi_sort_markers( # check if all markers are on the right process after sorting if do_test: - all_on_right_proc = xp.all( - xp.logical_and( + all_on_right_proc = np.all( + np.logical_and( self.positions > self.domain_array[self.mpi_rank, 0::3], self.positions < self.domain_array[self.mpi_rank, 1::3], ), @@ -1772,8 +1733,8 @@ def initialize_weights( *, bckgr_params: dict = None, pert_params: dict = None, - # reject_weights: bool = False, - # threshold: float = 1e-8, + reject_weights: bool = False, + threshold: float = 1e-8, ): r""" Computes the initial weights @@ -1794,14 +1755,20 @@ def initialize_weights( pert_params : dict Kinetic perturbation parameters for initial condition. + + reject_weights : bool + Whether to use ``threshold`` for rejecting weights. + + threshold : float + Minimal value of a weight; below the marker is set to a hole.les. """ if self.loading == "tesselation": - if not self.is_volume_form[0]: + if self.pforms[0] is None: fvol = TransformedPformComponent([self.f_init], "0", "3", domain=self.domain) else: fvol = self.f_init - cell_avg = self.tesselation.cell_averages(fvol, n_quad=self.loading_params.n_quad) + cell_avg = self.tesselation.cell_averages(fvol, n_quad=self.loading_params["n_quad"]) self.weights0 = cell_avg.flatten() else: assert self.domain is not None, "A domain is needed to initialize weights." @@ -1823,10 +1790,10 @@ def initialize_weights( f_init = self.f_init(*self.f_coords.T) # if f_init is vol-form, transform to 0-form - if self.is_volume_form[0]: + if self.pforms[0] == "vol": f_init /= self.domain.jacobian_det(self.positions) - if self.is_volume_form[1]: + if self.pforms[1] == "vol": f_init /= self.f_init.velocity_jacobian_det( *self.f_jacobian_coords.T, ) @@ -1837,13 +1804,13 @@ def initialize_weights( # compute w0 and save at vdim + 5 self.weights0 = f_init / self.sampling_density - if self.reject_weights: - reject = self.markers[:, self.index["w0"]] < self.threshold + if reject_weights: + reject = self.markers[:, self.index["w0"]] < threshold self._markers[reject] = -1.0 self.update_holes() self.reset_marker_ids() print( - f"\nWeights < {self.threshold} have been rejected, number of valid markers on process {self.mpi_rank} is {self.n_mks_loc}.", + f"\nWeights < {threshold} have been rejected, number of valid markers on process {self.mpi_rank} is {self.n_mks_loc}." ) # compute (time-dependent) weights at vdim + 3 @@ -1852,7 +1819,6 @@ def initialize_weights( else: self.weights = self.weights0 - @profile def update_weights(self): """ Applies the control variate method, i.e. updates the time-dependent marker weights @@ -1869,10 +1835,10 @@ def update_weights(self): f0 = self.f0(*self.f_coords.T) # if f_init is vol-form, transform to 0-form - if self.is_volume_form[0]: + if self.pforms[0] == "vol": f0 /= self.domain.jacobian_det(self.positions) - if self.is_volume_form[1]: + if self.pforms[1] == "vol": f0 /= self.f0.velocity_jacobian_det(*self.f_jacobian_coords.T) self.weights = self.weights0 - f0 / self.sampling_density @@ -1880,29 +1846,23 @@ def update_weights(self): def reset_marker_ids(self): """Reset the marker ids (last column in marker array) according to the current distribution of particles. The first marker on rank 0 gets the id '0', the last marker on the last rank gets the id 'n_mks_global - 1'.""" - n_mks_proc_cumsum = xp.cumsum(self.n_mks_on_each_proc) - n_mks_clone_cumsum = xp.cumsum(self.n_mks_on_each_clone) + n_mks_proc_cumsum = np.cumsum(self.n_mks_on_each_proc) + n_mks_clone_cumsum = np.cumsum(self.n_mks_on_each_clone) first_marker_id = (n_mks_clone_cumsum - self.n_mks_on_each_clone)[self.clone_id] + ( n_mks_proc_cumsum - self.n_mks_on_each_proc )[self.mpi_rank] - self.marker_ids = first_marker_id + xp.arange(self.n_mks_loc, dtype=int) + self.marker_ids = first_marker_id + np.arange(self.n_mks_loc, dtype=int) - @profile - def binning( - self, - components: tuple[bool], - bin_edges: tuple[xp.ndarray], - divide_by_jac: bool = True, - ): + def binning(self, components, bin_edges, divide_by_jac=True): r"""Computes full-f and delta-f distribution functions via marker binning in logical space. Numpy's histogramdd is used, following the algorithm outlined in :ref:`binning`. Parameters ---------- - components : tuple[bool] + components : list[bool] List of length 3 + vdim; an entry is True if the direction in phase space is to be binned. - bin_edges : tuple[array] + bin_edges : list[array] List of bin edges (resolution) having the length of True entries in components. divide_by_jac : boll @@ -1917,7 +1877,7 @@ def binning( The reconstructed delta-f distribution function. """ - assert xp.count_nonzero(components) == len(bin_edges) + assert np.count_nonzero(components) == len(bin_edges) # volume of a bin bin_vol = 1.0 @@ -1939,13 +1899,13 @@ def binning( _weights0 /= self.domain.jacobian_det(self.positions, remove_outside=False) # _weights0 /= self.velocity_jacobian_det(*self.phasespace_coords.T) - f_slice = xp.histogramdd( + f_slice = np.histogramdd( self.markers_wo_holes_and_ghost[:, slicing], bins=bin_edges, weights=_weights0, )[0] - df_slice = xp.histogramdd( + df_slice = np.histogramdd( self.markers_wo_holes_and_ghost[:, slicing], bins=bin_edges, weights=_weights, @@ -1972,7 +1932,7 @@ def show_distribution_function(self, components, bin_edges): import matplotlib.pyplot as plt - n_dim = xp.count_nonzero(components) + n_dim = np.count_nonzero(components) assert n_dim == 1 or n_dim == 2, f"Distribution function can only be shown in 1D or 2D slices, not {n_dim}." @@ -1988,7 +1948,7 @@ def show_distribution_function(self, components, bin_edges): 4: "$v_2$", 5: "$v_3$", } - indices = xp.nonzero(components)[0] + indices = np.nonzero(components)[0] if n_dim == 1: plt.plot(bin_centers[0], f_slice) @@ -2012,17 +1972,16 @@ def _find_outside_particles(self, axis): self._is_outside_left[self.holes] = False self._is_outside_left[self.ghost_particles] = False - self._is_outside[:] = xp.logical_or( + self._is_outside[:] = np.logical_or( self._is_outside_right, self._is_outside_left, ) # indices or particles that are outside of the logical unit cube - outside_inds = xp.nonzero(self._is_outside)[0] + outside_inds = np.nonzero(self._is_outside)[0] return outside_inds - @profile def apply_kinetic_bc(self, newton=False): """ Apply boundary conditions to markers that are outside of the logical unit cube. @@ -2045,7 +2004,7 @@ def apply_kinetic_bc(self, newton=False): self.particle_refilling() self._markers[self._is_outside, :-1] = -1.0 - self._n_lost_markers += len(xp.nonzero(self._is_outside)[0]) + self._n_lost_markers += len(np.nonzero(self._is_outside)[0]) for axis in self._periodic_axes: outside_inds = self._find_outside_particles(axis) @@ -2056,8 +2015,8 @@ def apply_kinetic_bc(self, newton=False): self.markers[outside_inds, axis] = self.markers[outside_inds, axis] % 1.0 # set shift for alpha-weighted mid-point computation - outside_right_inds = xp.nonzero(self._is_outside_right)[0] - outside_left_inds = xp.nonzero(self._is_outside_left)[0] + outside_right_inds = np.nonzero(self._is_outside_right)[0] + outside_left_inds = np.nonzero(self._is_outside_left)[0] if newton: self.markers[ outside_right_inds, @@ -2125,12 +2084,12 @@ def particle_refilling(self): for kind in self.bc_refill: # sorting out particles which are out of the domain if kind == "inner": - outside_inds = xp.nonzero(self._is_outside_left)[0] + outside_inds = np.nonzero(self._is_outside_left)[0] self.markers[outside_inds, 0] = 1e-4 r_loss = self.domain.params["a1"] else: - outside_inds = xp.nonzero(self._is_outside_right)[0] + outside_inds = np.nonzero(self._is_outside_right)[0] self.markers[outside_inds, 0] = 1 - 1e-4 r_loss = 1.0 @@ -2179,12 +2138,12 @@ def gyro_transfer(self, outside_inds): Parameters ---------- - outside_inds : xp.array (int) + outside_inds : np.array (int) An array of indices of particles which are outside of the domain. Returns ------- - out : xp.array (bool) + out : np.array (bool) An array of indices of particles where its guiding centers are outside of the domain. """ @@ -2201,18 +2160,18 @@ def gyro_transfer(self, outside_inds): b_cart, xyz = self.equil.b_cart(self.markers[outside_inds, :]) # calculate magnetic field amplitude and normalized magnetic field - absB0 = xp.sqrt(b_cart[0] ** 2 + b_cart[1] ** 2 + b_cart[2] ** 2) + absB0 = np.sqrt(b_cart[0] ** 2 + b_cart[1] ** 2 + b_cart[2] ** 2) norm_b_cart = b_cart / absB0 # calculate parallel and perpendicular velocities - v_parallel = xp.einsum("ij,ij->j", v, norm_b_cart) - v_perp = xp.cross(norm_b_cart, xp.cross(v, norm_b_cart, axis=0), axis=0) - v_perp_square = xp.sqrt(v_perp[0] ** 2 + v_perp[1] ** 2 + v_perp[2] ** 2) + v_parallel = np.einsum("ij,ij->j", v, norm_b_cart) + v_perp = np.cross(norm_b_cart, np.cross(v, norm_b_cart, axis=0), axis=0) + v_perp_square = np.sqrt(v_perp[0] ** 2 + v_perp[1] ** 2 + v_perp[2] ** 2) - assert xp.all(xp.isclose(v_perp, v - norm_b_cart * v_parallel)) + assert np.all(np.isclose(v_perp, v - norm_b_cart * v_parallel)) # calculate Larmor radius - Larmor_r = xp.cross(norm_b_cart, v_perp, axis=0) / absB0 * self._epsilon + Larmor_r = np.cross(norm_b_cart, v_perp, axis=0) / absB0 * self._epsilon # transform cartesian coordinates to logical coordinates # TODO: currently only possible with the geomoetry where its inverse map is defined. @@ -2231,17 +2190,17 @@ def gyro_transfer(self, outside_inds): b_cart = self.equil.b_cart(self.markers[outside_inds, :])[0] # calculate magnetic field amplitude and normalized magnetic field - absB0 = xp.sqrt(b_cart[0] ** 2 + b_cart[1] ** 2 + b_cart[2] ** 2) + absB0 = np.sqrt(b_cart[0] ** 2 + b_cart[1] ** 2 + b_cart[2] ** 2) norm_b_cart = b_cart / absB0 Larmor_r = new_xyz - xyz - Larmor_r /= xp.sqrt(Larmor_r[0] ** 2 + Larmor_r[1] ** 2 + Larmor_r[2] ** 2) + Larmor_r /= np.sqrt(Larmor_r[0] ** 2 + Larmor_r[1] ** 2 + Larmor_r[2] ** 2) - new_v_perp = xp.cross(Larmor_r, norm_b_cart, axis=0) * v_perp_square + new_v_perp = np.cross(Larmor_r, norm_b_cart, axis=0) * v_perp_square self.markers[outside_inds, 3:6] = (norm_b_cart * v_parallel).T + new_v_perp.T - return xp.logical_and(1.0 > gc_etas[0], gc_etas[0] > 0.0) + return np.logical_and(1.0 > gc_etas[0], gc_etas[0] > 0.0) class SortingBoxes: """Boxes used for the sorting of the particles. @@ -2424,26 +2383,27 @@ def _set_boxes(self): n_particles = self._markers_shape[0] n_mkr = int(n_particles / n_box_in) + 1 n_cols = round( - n_mkr * (1 + 1 / xp.sqrt(n_mkr) + self._box_bufsize), + n_mkr * (1 + 1 / np.sqrt(n_mkr) + self._box_bufsize), ) # cartesian boxes - self._boxes = xp.zeros((self._n_boxes + 1, n_cols), dtype=int) + self._boxes = np.zeros((self._n_boxes + 1, n_cols), dtype=int) # TODO: there is still a bug here # the row number in self._boxes should not be n_boxes + 1; this is just a temporary fix to avoid an error that I dont understand. # Must be fixed soon! - self._next_index = xp.zeros((self._n_boxes + 1), dtype=int) - self._cumul_next_index = xp.zeros((self._n_boxes + 2), dtype=int) - self._neighbours = xp.zeros((self._n_boxes, 27), dtype=int) + self._next_index = np.zeros((self._n_boxes + 1), dtype=int) + self._cumul_next_index = np.zeros((self._n_boxes + 2), dtype=int) + self._neighbours = np.zeros((self._n_boxes, 27), dtype=int) # A particle on box i only sees particles in boxes that belong to neighbours[i] initialize_neighbours(self._neighbours, self.nx, self.ny, self.nz) - # print(f"{self._rank = }\n{self._neighbours = }") + if self._verbose: + print(f"{self._rank = }\n{self._neighbours = }") - self._swap_line_1 = xp.zeros(self._markers_shape[1]) - self._swap_line_2 = xp.zeros(self._markers_shape[1]) + self._swap_line_1 = np.zeros(self._markers_shape[1]) + self._swap_line_2 = np.zeros(self._markers_shape[1]) def _set_boundary_boxes(self): """Gather all the boxes that are part of a boundary""" @@ -2464,7 +2424,7 @@ def _set_boundary_boxes(self): self._bnd_boxes_x_p.append(flatten_index(self.nx, j, k, self.nx, self.ny, self.nz)) if self._verbose: - print(f"eta1 boundary on {self._rank =}:\n{self._bnd_boxes_x_m =}\n{self._bnd_boxes_x_p =}") + print(f"eta1 boundary on {self._rank = }:\n{self._bnd_boxes_x_m = }\n{self._bnd_boxes_x_p = }") # y boundary # negative direction @@ -2479,7 +2439,7 @@ def _set_boundary_boxes(self): self._bnd_boxes_y_p.append(flatten_index(i, self.ny, k, self.nx, self.ny, self.nz)) if self._verbose: - print(f"eta2 boundary on {self._rank =}:\n{self._bnd_boxes_y_m =}\n{self._bnd_boxes_y_p =}") + print(f"eta2 boundary on {self._rank = }:\n{self._bnd_boxes_y_m = }\n{self._bnd_boxes_y_p = }") # z boundary # negative direction @@ -2494,7 +2454,7 @@ def _set_boundary_boxes(self): self._bnd_boxes_z_p.append(flatten_index(i, j, self.nz, self.nx, self.ny, self.nz)) if self._verbose: - print(f"eta3 boundary on {self._rank =}:\n{self._bnd_boxes_z_m =}\n{self._bnd_boxes_z_p =}") + print(f"eta3 boundary on {self._rank = }:\n{self._bnd_boxes_z_m = }\n{self._bnd_boxes_z_p = }") # x-y edges self._bnd_boxes_x_m_y_m = [] @@ -2512,11 +2472,11 @@ def _set_boundary_boxes(self): if self._verbose: print( ( - f"eta1-eta2 edge on {self._rank =}:\n{self._bnd_boxes_x_m_y_m =}" - f"\n{self._bnd_boxes_x_m_y_p =}" - f"\n{self._bnd_boxes_x_p_y_m =}" - f"\n{self._bnd_boxes_x_p_y_p =}" - ), + f"eta1-eta2 edge on {self._rank = }:\n{self._bnd_boxes_x_m_y_m = }" + f"\n{self._bnd_boxes_x_m_y_p = }" + f"\n{self._bnd_boxes_x_p_y_m = }" + f"\n{self._bnd_boxes_x_p_y_p = }" + ) ) # x-z edges @@ -2535,11 +2495,11 @@ def _set_boundary_boxes(self): if self._verbose: print( ( - f"eta1-eta3 edge on {self._rank =}:\n{self._bnd_boxes_x_m_z_m =}" - f"\n{self._bnd_boxes_x_m_z_p =}" - f"\n{self._bnd_boxes_x_p_z_m =}" - f"\n{self._bnd_boxes_x_p_z_p =}" - ), + f"eta1-eta3 edge on {self._rank = }:\n{self._bnd_boxes_x_m_z_m = }" + f"\n{self._bnd_boxes_x_m_z_p = }" + f"\n{self._bnd_boxes_x_p_z_m = }" + f"\n{self._bnd_boxes_x_p_z_p = }" + ) ) # y-z edges @@ -2558,11 +2518,11 @@ def _set_boundary_boxes(self): if self._verbose: print( ( - f"eta2-eta3 edge on {self._rank =}:\n{self._bnd_boxes_y_m_z_m =}" - f"\n{self._bnd_boxes_y_m_z_p =}" - f"\n{self._bnd_boxes_y_p_z_m =}" - f"\n{self._bnd_boxes_y_p_z_p =}" - ), + f"eta2-eta3 edge on {self._rank = }:\n{self._bnd_boxes_y_m_z_m = }" + f"\n{self._bnd_boxes_y_m_z_p = }" + f"\n{self._bnd_boxes_y_p_z_m = }" + f"\n{self._bnd_boxes_y_p_z_p = }" + ) ) # corners @@ -2588,15 +2548,15 @@ def _set_boundary_boxes(self): if self._verbose: print( ( - f"corners on {self._rank =}:\n{self._bnd_boxes_x_m_y_m_z_m =}" - f"\n{self._bnd_boxes_x_m_y_m_z_p =}" - f"\n{self._bnd_boxes_x_m_y_p_z_m =}" - f"\n{self._bnd_boxes_x_p_y_m_z_m =}" - f"\n{self._bnd_boxes_x_m_y_p_z_p =}" - f"\n{self._bnd_boxes_x_p_y_m_z_p =}" - f"\n{self._bnd_boxes_x_p_y_p_z_m =}" - f"\n{self._bnd_boxes_x_p_y_p_z_p =}" - ), + f"corners on {self._rank = }:\n{self._bnd_boxes_x_m_y_m_z_m = }" + f"\n{self._bnd_boxes_x_m_y_m_z_p = }" + f"\n{self._bnd_boxes_x_m_y_p_z_m = }" + f"\n{self._bnd_boxes_x_p_y_m_z_m = }" + f"\n{self._bnd_boxes_x_m_y_p_z_p = }" + f"\n{self._bnd_boxes_x_p_y_m_z_p = }" + f"\n{self._bnd_boxes_x_p_y_p_z_m = }" + f"\n{self._bnd_boxes_x_p_y_p_z_p = }" + ) ) def _sort_boxed_particles_numpy(self): @@ -2604,12 +2564,11 @@ def _sort_boxed_particles_numpy(self): sorting_axis = self._sorting_boxes.box_index if not hasattr(self, "_argsort_array"): - self._argsort_array = xp.zeros(self.markers.shape[0], dtype=int) + self._argsort_array = np.zeros(self.markers.shape[0], dtype=int) self._argsort_array[:] = self._markers[:, sorting_axis].argsort() self._markers[:, :] = self._markers[self._argsort_array] - @profile def put_particles_in_boxes(self): """Assign the right box to the particles and the list of the particles to each box. If sorting_boxes was instantiated with an MPI comm, then the particles in the @@ -2632,25 +2591,25 @@ def put_particles_in_boxes(self): self.check_and_assign_particles_to_boxes() self.update_ghost_particles() - # if self.verbose: - # valid_box_ids = xp.nonzero(self._sorting_boxes._boxes[:, 0] != -1)[0] - # print(f"Boxes holding at least one particle: {valid_box_ids}") - # for i in valid_box_ids: - # n_mks_box = xp.count_nonzero(self._sorting_boxes._boxes[i] != -1) - # print(f"Number of markers in box {i} is {n_mks_box}") + if self.verbose: + valid_box_ids = np.nonzero(self._sorting_boxes._boxes[:, 0] != -1)[0] + print(f"Boxes holding at least one particle: {valid_box_ids}") + for i in valid_box_ids: + n_mks_box = np.count_nonzero(self._sorting_boxes._boxes[i] != -1) + print(f"Number of markers in box {i} is {n_mks_box}") def check_and_assign_particles_to_boxes(self): """Check whether the box array has enough columns (detect load imbalance wrt to sorting boxes), and then assigne the particles to boxes.""" - bcount = xp.bincount(xp.int64(self.markers_wo_holes[:, -2])) - max_in_box = xp.max(bcount) + bcount = np.bincount(np.int64(self.markers_wo_holes[:, -2])) + max_in_box = np.max(bcount) if max_in_box > self._sorting_boxes.boxes.shape[1]: warnings.warn( f'Strong load imbalance detected in sorting boxes: \ max number of markers in a box ({max_in_box}) on rank {self.mpi_rank} \ exceeds the column-size of the box array ({self._sorting_boxes.boxes.shape[1]}). \ -Increasing the value of "box_bufsize" in the markers parameters for the next run.', +Increasing the value of "box_bufsize" in the markers parameters for the next run.' ) self.mpi_comm.Abort() @@ -2661,7 +2620,6 @@ def check_and_assign_particles_to_boxes(self): self._sorting_boxes._next_index, ) - @profile def do_sort(self, use_numpy_argsort=False): """Assign the particles to boxes and then sort them.""" nx = self._sorting_boxes.nx @@ -2688,7 +2646,7 @@ def do_sort(self, use_numpy_argsort=False): def remove_ghost_particles(self): self.update_ghost_particles() - new_holes = xp.nonzero(self.ghost_particles) + new_holes = np.nonzero(self.ghost_particles) self._markers[new_holes] = -1.0 self.update_holes() @@ -2702,8 +2660,8 @@ def prepare_ghost_particles(self): 4. optional: mirror position for boundary conditions """ shifts = self.sorting_boxes.bc_sph_index_shifts - # if self.verbose: - # print(f"{self.sorting_boxes.bc_sph_index_shifts = }") + if self.verbose: + print(f"{self.sorting_boxes.bc_sph_index_shifts = }") ## Faces @@ -2734,23 +2692,17 @@ def prepare_ghost_particles(self): # Mirror position for boundary condition if self.bc_sph[0] in ("mirror", "fixed"): self._mirror_particles( - "_markers_x_m", - "_markers_x_p", - is_domain_boundary=self.sorting_boxes.is_domain_boundary, + "_markers_x_m", "_markers_x_p", is_domain_boundary=self.sorting_boxes.is_domain_boundary ) if self.bc_sph[1] in ("mirror", "fixed"): self._mirror_particles( - "_markers_y_m", - "_markers_y_p", - is_domain_boundary=self.sorting_boxes.is_domain_boundary, + "_markers_y_m", "_markers_y_p", is_domain_boundary=self.sorting_boxes.is_domain_boundary ) if self.bc_sph[2] in ("mirror", "fixed"): self._mirror_particles( - "_markers_z_m", - "_markers_z_p", - is_domain_boundary=self.sorting_boxes.is_domain_boundary, + "_markers_z_m", "_markers_z_p", is_domain_boundary=self.sorting_boxes.is_domain_boundary ) ## Edges x-y @@ -2905,8 +2857,7 @@ def _mirror_particles(self, *marker_array_names, is_domain_boundary=None): arr[:, 0] *= -1.0 if self.bc_sph[0] == "fixed" and arr_name not in self._fixed_markers_set: boundary_values = self.f_init( - *arr[:, :3].T, - flat_eval=True, + *arr[:, :3].T, flat_eval=True ) # evaluation outside of the unit cube - maybe not working for all f_init! arr[:, self.index["weights"]] = -boundary_values / self.s0( *arr[:, :3].T, @@ -2918,8 +2869,7 @@ def _mirror_particles(self, *marker_array_names, is_domain_boundary=None): arr[:, 0] = 2.0 - arr[:, 0] if self.bc_sph[0] == "fixed" and arr_name not in self._fixed_markers_set: boundary_values = self.f_init( - *arr[:, :3].T, - flat_eval=True, + *arr[:, :3].T, flat_eval=True ) # evaluation outside of the unit cube - maybe not working for all f_init! arr[:, self.index["weights"]] = -boundary_values / self.s0( *arr[:, :3].T, @@ -2934,8 +2884,7 @@ def _mirror_particles(self, *marker_array_names, is_domain_boundary=None): arr[:, 1] *= -1.0 if self.bc_sph[1] == "fixed" and arr_name not in self._fixed_markers_set: boundary_values = self.f_init( - *arr[:, :3].T, - flat_eval=True, + *arr[:, :3].T, flat_eval=True ) # evaluation outside of the unit cube - maybe not working for all f_init! arr[:, self.index["weights"]] = -boundary_values / self.s0( *arr[:, :3].T, @@ -2947,8 +2896,7 @@ def _mirror_particles(self, *marker_array_names, is_domain_boundary=None): arr[:, 1] = 2.0 - arr[:, 1] if self.bc_sph[1] == "fixed" and arr_name not in self._fixed_markers_set: boundary_values = self.f_init( - *arr[:, :3].T, - flat_eval=True, + *arr[:, :3].T, flat_eval=True ) # evaluation outside of the unit cube - maybe not working for all f_init! arr[:, self.index["weights"]] = -boundary_values / self.s0( *arr[:, :3].T, @@ -2963,8 +2911,7 @@ def _mirror_particles(self, *marker_array_names, is_domain_boundary=None): arr[:, 2] *= -1.0 if self.bc_sph[2] == "fixed" and arr_name not in self._fixed_markers_set: boundary_values = self.f_init( - *arr[:, :3].T, - flat_eval=True, + *arr[:, :3].T, flat_eval=True ) # evaluation outside of the unit cube - maybe not working for all f_init! arr[:, self.index["weights"]] = -boundary_values / self.s0( *arr[:, :3].T, @@ -2976,8 +2923,7 @@ def _mirror_particles(self, *marker_array_names, is_domain_boundary=None): arr[:, 2] = 2.0 - arr[:, 2] if self.bc_sph[2] == "fixed" and arr_name not in self._fixed_markers_set: boundary_values = self.f_init( - *arr[:, :3].T, - flat_eval=True, + *arr[:, :3].T, flat_eval=True ) # evaluation outside of the unit cube - maybe not working for all f_init! arr[:, self.index["weights"]] = -boundary_values / self.s0( *arr[:, :3].T, @@ -2992,162 +2938,162 @@ def determine_markers_in_box(self, list_boxes): for i in list_boxes: indices += list(self._sorting_boxes._boxes[i][self._sorting_boxes._boxes[i] != -1]) - indices = xp.array(indices, dtype=int) + indices = np.array(indices, dtype=int) markers_in_box = self.markers[indices] return markers_in_box def get_destinations_box(self): """Find the destination proc for the particles to communicate for the box structure.""" - self._send_info_box = xp.zeros(self.mpi_size, dtype=int) - self._send_list_box = [xp.zeros((0, self.n_cols))] * self.mpi_size + self._send_info_box = np.zeros(self.mpi_size, dtype=int) + self._send_list_box = [np.zeros((0, self.n_cols))] * self.mpi_size # Faces # if self._x_m_proc is not None: self._send_info_box[self._x_m_proc] += len(self._markers_x_m) - self._send_list_box[self._x_m_proc] = xp.concatenate((self._send_list_box[self._x_m_proc], self._markers_x_m)) + self._send_list_box[self._x_m_proc] = np.concatenate((self._send_list_box[self._x_m_proc], self._markers_x_m)) # if self._x_p_proc is not None: self._send_info_box[self._x_p_proc] += len(self._markers_x_p) - self._send_list_box[self._x_p_proc] = xp.concatenate((self._send_list_box[self._x_p_proc], self._markers_x_p)) + self._send_list_box[self._x_p_proc] = np.concatenate((self._send_list_box[self._x_p_proc], self._markers_x_p)) # if self._y_m_proc is not None: self._send_info_box[self._y_m_proc] += len(self._markers_y_m) - self._send_list_box[self._y_m_proc] = xp.concatenate((self._send_list_box[self._y_m_proc], self._markers_y_m)) + self._send_list_box[self._y_m_proc] = np.concatenate((self._send_list_box[self._y_m_proc], self._markers_y_m)) # if self._y_p_proc is not None: self._send_info_box[self._y_p_proc] += len(self._markers_y_p) - self._send_list_box[self._y_p_proc] = xp.concatenate((self._send_list_box[self._y_p_proc], self._markers_y_p)) + self._send_list_box[self._y_p_proc] = np.concatenate((self._send_list_box[self._y_p_proc], self._markers_y_p)) # if self._z_m_proc is not None: self._send_info_box[self._z_m_proc] += len(self._markers_z_m) - self._send_list_box[self._z_m_proc] = xp.concatenate((self._send_list_box[self._z_m_proc], self._markers_z_m)) + self._send_list_box[self._z_m_proc] = np.concatenate((self._send_list_box[self._z_m_proc], self._markers_z_m)) # if self._z_p_proc is not None: self._send_info_box[self._z_p_proc] += len(self._markers_z_p) - self._send_list_box[self._z_p_proc] = xp.concatenate((self._send_list_box[self._z_p_proc], self._markers_z_p)) + self._send_list_box[self._z_p_proc] = np.concatenate((self._send_list_box[self._z_p_proc], self._markers_z_p)) # x-y edges # if self._x_m_y_m_proc is not None: self._send_info_box[self._x_m_y_m_proc] += len(self._markers_x_m_y_m) - self._send_list_box[self._x_m_y_m_proc] = xp.concatenate( - (self._send_list_box[self._x_m_y_m_proc], self._markers_x_m_y_m), + self._send_list_box[self._x_m_y_m_proc] = np.concatenate( + (self._send_list_box[self._x_m_y_m_proc], self._markers_x_m_y_m) ) # if self._x_m_y_p_proc is not None: self._send_info_box[self._x_m_y_p_proc] += len(self._markers_x_m_y_p) - self._send_list_box[self._x_m_y_p_proc] = xp.concatenate( - (self._send_list_box[self._x_m_y_p_proc], self._markers_x_m_y_p), + self._send_list_box[self._x_m_y_p_proc] = np.concatenate( + (self._send_list_box[self._x_m_y_p_proc], self._markers_x_m_y_p) ) # if self._x_p_y_m_proc is not None: self._send_info_box[self._x_p_y_m_proc] += len(self._markers_x_p_y_m) - self._send_list_box[self._x_p_y_m_proc] = xp.concatenate( - (self._send_list_box[self._x_p_y_m_proc], self._markers_x_p_y_m), + self._send_list_box[self._x_p_y_m_proc] = np.concatenate( + (self._send_list_box[self._x_p_y_m_proc], self._markers_x_p_y_m) ) # if self._x_p_y_p_proc is not None: self._send_info_box[self._x_p_y_p_proc] += len(self._markers_x_p_y_p) - self._send_list_box[self._x_p_y_p_proc] = xp.concatenate( - (self._send_list_box[self._x_p_y_p_proc], self._markers_x_p_y_p), + self._send_list_box[self._x_p_y_p_proc] = np.concatenate( + (self._send_list_box[self._x_p_y_p_proc], self._markers_x_p_y_p) ) # x-z edges # if self._x_m_z_m_proc is not None: self._send_info_box[self._x_m_z_m_proc] += len(self._markers_x_m_z_m) - self._send_list_box[self._x_m_z_m_proc] = xp.concatenate( - (self._send_list_box[self._x_m_z_m_proc], self._markers_x_m_z_m), + self._send_list_box[self._x_m_z_m_proc] = np.concatenate( + (self._send_list_box[self._x_m_z_m_proc], self._markers_x_m_z_m) ) # if self._x_m_z_p_proc is not None: self._send_info_box[self._x_m_z_p_proc] += len(self._markers_x_m_z_p) - self._send_list_box[self._x_m_z_p_proc] = xp.concatenate( - (self._send_list_box[self._x_m_z_p_proc], self._markers_x_m_z_p), + self._send_list_box[self._x_m_z_p_proc] = np.concatenate( + (self._send_list_box[self._x_m_z_p_proc], self._markers_x_m_z_p) ) # if self._x_p_z_m_proc is not None: self._send_info_box[self._x_p_z_m_proc] += len(self._markers_x_p_z_m) - self._send_list_box[self._x_p_z_m_proc] = xp.concatenate( - (self._send_list_box[self._x_p_z_m_proc], self._markers_x_p_z_m), + self._send_list_box[self._x_p_z_m_proc] = np.concatenate( + (self._send_list_box[self._x_p_z_m_proc], self._markers_x_p_z_m) ) # if self._x_p_z_p_proc is not None: self._send_info_box[self._x_p_z_p_proc] += len(self._markers_x_p_z_p) - self._send_list_box[self._x_p_z_p_proc] = xp.concatenate( - (self._send_list_box[self._x_p_z_p_proc], self._markers_x_p_z_p), + self._send_list_box[self._x_p_z_p_proc] = np.concatenate( + (self._send_list_box[self._x_p_z_p_proc], self._markers_x_p_z_p) ) # y-z edges # if self._y_m_z_m_proc is not None: self._send_info_box[self._y_m_z_m_proc] += len(self._markers_y_m_z_m) - self._send_list_box[self._y_m_z_m_proc] = xp.concatenate( - (self._send_list_box[self._y_m_z_m_proc], self._markers_y_m_z_m), + self._send_list_box[self._y_m_z_m_proc] = np.concatenate( + (self._send_list_box[self._y_m_z_m_proc], self._markers_y_m_z_m) ) # if self._y_m_z_p_proc is not None: self._send_info_box[self._y_m_z_p_proc] += len(self._markers_y_m_z_p) - self._send_list_box[self._y_m_z_p_proc] = xp.concatenate( - (self._send_list_box[self._y_m_z_p_proc], self._markers_y_m_z_p), + self._send_list_box[self._y_m_z_p_proc] = np.concatenate( + (self._send_list_box[self._y_m_z_p_proc], self._markers_y_m_z_p) ) # if self._y_p_z_m_proc is not None: self._send_info_box[self._y_p_z_m_proc] += len(self._markers_y_p_z_m) - self._send_list_box[self._y_p_z_m_proc] = xp.concatenate( - (self._send_list_box[self._y_p_z_m_proc], self._markers_y_p_z_m), + self._send_list_box[self._y_p_z_m_proc] = np.concatenate( + (self._send_list_box[self._y_p_z_m_proc], self._markers_y_p_z_m) ) # if self._y_p_z_p_proc is not None: self._send_info_box[self._y_p_z_p_proc] += len(self._markers_y_p_z_p) - self._send_list_box[self._y_p_z_p_proc] = xp.concatenate( - (self._send_list_box[self._y_p_z_p_proc], self._markers_y_p_z_p), + self._send_list_box[self._y_p_z_p_proc] = np.concatenate( + (self._send_list_box[self._y_p_z_p_proc], self._markers_y_p_z_p) ) # corners # if self._x_m_y_m_z_m_proc is not None: self._send_info_box[self._x_m_y_m_z_m_proc] += len(self._markers_x_m_y_m_z_m) - self._send_list_box[self._x_m_y_m_z_m_proc] = xp.concatenate( - (self._send_list_box[self._x_m_y_m_z_m_proc], self._markers_x_m_y_m_z_m), + self._send_list_box[self._x_m_y_m_z_m_proc] = np.concatenate( + (self._send_list_box[self._x_m_y_m_z_m_proc], self._markers_x_m_y_m_z_m) ) # if self._x_m_y_m_z_p_proc is not None: self._send_info_box[self._x_m_y_m_z_p_proc] += len(self._markers_x_m_y_m_z_p) - self._send_list_box[self._x_m_y_m_z_p_proc] = xp.concatenate( - (self._send_list_box[self._x_m_y_m_z_p_proc], self._markers_x_m_y_m_z_p), + self._send_list_box[self._x_m_y_m_z_p_proc] = np.concatenate( + (self._send_list_box[self._x_m_y_m_z_p_proc], self._markers_x_m_y_m_z_p) ) # if self._x_m_y_p_z_m_proc is not None: self._send_info_box[self._x_m_y_p_z_m_proc] += len(self._markers_x_m_y_p_z_m) - self._send_list_box[self._x_m_y_p_z_m_proc] = xp.concatenate( - (self._send_list_box[self._x_m_y_p_z_m_proc], self._markers_x_m_y_p_z_m), + self._send_list_box[self._x_m_y_p_z_m_proc] = np.concatenate( + (self._send_list_box[self._x_m_y_p_z_m_proc], self._markers_x_m_y_p_z_m) ) # if self._x_m_y_p_z_p_proc is not None: self._send_info_box[self._x_m_y_p_z_p_proc] += len(self._markers_x_m_y_p_z_p) - self._send_list_box[self._x_m_y_p_z_p_proc] = xp.concatenate( - (self._send_list_box[self._x_m_y_p_z_p_proc], self._markers_x_m_y_p_z_p), + self._send_list_box[self._x_m_y_p_z_p_proc] = np.concatenate( + (self._send_list_box[self._x_m_y_p_z_p_proc], self._markers_x_m_y_p_z_p) ) # if self._x_p_y_m_z_m_proc is not None: self._send_info_box[self._x_p_y_m_z_m_proc] += len(self._markers_x_p_y_m_z_m) - self._send_list_box[self._x_p_y_m_z_m_proc] = xp.concatenate( - (self._send_list_box[self._x_p_y_m_z_m_proc], self._markers_x_p_y_m_z_m), + self._send_list_box[self._x_p_y_m_z_m_proc] = np.concatenate( + (self._send_list_box[self._x_p_y_m_z_m_proc], self._markers_x_p_y_m_z_m) ) # if self._x_p_y_m_z_p_proc is not None: self._send_info_box[self._x_p_y_m_z_p_proc] += len(self._markers_x_p_y_m_z_p) - self._send_list_box[self._x_p_y_m_z_p_proc] = xp.concatenate( - (self._send_list_box[self._x_p_y_m_z_p_proc], self._markers_x_p_y_m_z_p), + self._send_list_box[self._x_p_y_m_z_p_proc] = np.concatenate( + (self._send_list_box[self._x_p_y_m_z_p_proc], self._markers_x_p_y_m_z_p) ) # if self._x_p_y_p_z_m_proc is not None: self._send_info_box[self._x_p_y_p_z_m_proc] += len(self._markers_x_p_y_p_z_m) - self._send_list_box[self._x_p_y_p_z_m_proc] = xp.concatenate( - (self._send_list_box[self._x_p_y_p_z_m_proc], self._markers_x_p_y_p_z_m), + self._send_list_box[self._x_p_y_p_z_m_proc] = np.concatenate( + (self._send_list_box[self._x_p_y_p_z_m_proc], self._markers_x_p_y_p_z_m) ) # if self._x_p_y_p_z_p_proc is not None: self._send_info_box[self._x_p_y_p_z_p_proc] += len(self._markers_x_p_y_p_z_p) - self._send_list_box[self._x_p_y_p_z_p_proc] = xp.concatenate( - (self._send_list_box[self._x_p_y_p_z_p_proc], self._markers_x_p_y_p_z_p), + self._send_list_box[self._x_p_y_p_z_p_proc] = np.concatenate( + (self._send_list_box[self._x_p_y_p_z_p_proc], self._markers_x_p_y_p_z_p) ) def self_communication_boxes(self): @@ -3156,14 +3102,14 @@ def self_communication_boxes(self): if self._send_info_box[self.mpi_rank] > 0: self.update_holes() - holes_inds = xp.nonzero(self.holes)[0] + holes_inds = np.nonzero(self.holes)[0] if holes_inds.size < self._send_info_box[self.mpi_rank]: warnings.warn( f'Strong load imbalance detected: \ number of holes ({holes_inds.size}) on rank {self.mpi_rank} \ is smaller than number of incoming particles ({self._send_info_box[self.mpi_rank]}). \ -Increasing the value of "bufsize" in the markers parameters for the next run.', +Increasing the value of "bufsize" in the markers parameters for the next run.' ) self.mpi_comm.Abort() @@ -3178,17 +3124,16 @@ def self_communication_boxes(self): # self.update_holes() # self.update_ghost_particles() # self.update_valid_mks() - # holes_inds = xp.nonzero(self.holes)[0] + # holes_inds = np.nonzero(self.holes)[0] - self.markers[holes_inds[xp.arange(self._send_info_box[self.mpi_rank])]] = self._send_list_box[self.mpi_rank] + self.markers[holes_inds[np.arange(self._send_info_box[self.mpi_rank])]] = self._send_list_box[self.mpi_rank] - @profile def communicate_boxes(self, verbose=False): - # if verbose: - # n_valid = xp.count_nonzero(self.valid_mks) - # n_holes = xp.count_nonzero(self.holes) - # n_ghosts = xp.count_nonzero(self.ghost_particles) - # print(f"before communicate_boxes: {self.mpi_rank = }, {n_valid = } {n_holes = }, {n_ghosts = }") + if verbose: + n_valid = np.count_nonzero(self.valid_mks) + n_holes = np.count_nonzero(self.holes) + n_ghosts = np.count_nonzero(self.ghost_particles) + print(f"before communicate_boxes: {self.mpi_rank = }, {n_valid = } {n_holes = }, {n_ghosts = }") self.prepare_ghost_particles() self.get_destinations_box() @@ -3201,11 +3146,11 @@ def communicate_boxes(self, verbose=False): self.update_holes() self.update_ghost_particles() - # if verbose: - # n_valid = xp.count_nonzero(self.valid_mks) - # n_holes = xp.count_nonzero(self.holes) - # n_ghosts = xp.count_nonzero(self.ghost_particles) - # print(f"after communicate_boxes: {self.mpi_rank = }, {n_valid = }, {n_holes = }, {n_ghosts = }") + if verbose: + n_valid = np.count_nonzero(self.valid_mks) + n_holes = np.count_nonzero(self.holes) + n_ghosts = np.count_nonzero(self.ghost_particles) + print(f"after communicate_boxes: {self.mpi_rank = }, {n_valid = }, {n_holes = }, {n_ghosts = }") def sendrecv_all_to_all_boxes(self): """ @@ -3213,7 +3158,7 @@ def sendrecv_all_to_all_boxes(self): for the communication of particles in boundary boxes. """ - self._recv_info_box = xp.zeros(self.mpi_comm.Get_size(), dtype=int) + self._recv_info_box = np.zeros(self.mpi_comm.Get_size(), dtype=int) self.mpi_comm.Alltoall(self._send_info_box, self._recv_info_box) @@ -3224,8 +3169,8 @@ def sendrecv_markers_boxes(self): """ # i-th entry holds the number (not the index) of the first hole to be filled by data from process i - first_hole = xp.cumsum(self._recv_info_box) - self._recv_info_box - hole_inds = xp.nonzero(self._holes)[0] + first_hole = np.cumsum(self._recv_info_box) - self._recv_info_box + hole_inds = np.nonzero(self._holes)[0] # Initialize send and receive commands reqs = [] recvbufs = [] @@ -3236,7 +3181,7 @@ def sendrecv_markers_boxes(self): else: self.mpi_comm.Isend(data, dest=i, tag=self.mpi_comm.Get_rank()) - recvbufs += [xp.zeros((N_recv, self._markers.shape[1]), dtype=float)] + recvbufs += [np.zeros((N_recv, self._markers.shape[1]), dtype=float)] reqs += [self.mpi_comm.Irecv(recvbufs[-1], source=i, tag=i)] # Wait for buffer, then put markers into holes @@ -3254,12 +3199,12 @@ def sendrecv_markers_boxes(self): f'Strong load imbalance detected: \ number of holes ({hole_inds.size}) on rank {self.mpi_rank} \ is smaller than number of incoming particles ({first_hole[i] + self._recv_info_box[i]}). \ -Increasing the value of "bufsize" in the markers parameters for the next run.', +Increasing the value of "bufsize" in the markers parameters for the next run.' ) self.mpi_comm.Abort() # exit() - self._markers[hole_inds[first_hole[i] + xp.arange(self._recv_info_box[i])]] = recvbufs[i] + self._markers[hole_inds[first_hole[i] + np.arange(self._recv_info_box[i])]] = recvbufs[i] test_reqs.pop() reqs[i] = None @@ -3734,11 +3679,11 @@ def eval_density( def eval_sph( self, - eta1: xp.ndarray, - eta2: xp.ndarray, - eta3: xp.ndarray, + eta1: np.ndarray, + eta2: np.ndarray, + eta3: np.ndarray, index: int, - out: xp.ndarray = None, + out: np.ndarray = None, fast: bool = True, kernel_type: str = "gaussian_1d", derivative: int = "0", @@ -3784,12 +3729,12 @@ def eval_sph( h1, h2, h3 : float Radius of the smoothing kernel in each dimension. """ - _shp = xp.shape(eta1) - assert _shp == xp.shape(eta2) == xp.shape(eta3) + _shp = np.shape(eta1) + assert _shp == np.shape(eta2) == np.shape(eta3) if out is not None: - assert _shp == xp.shape(out) + assert _shp == np.shape(out) else: - out = xp.zeros_like(eta1) + out = np.zeros_like(eta1) assert derivative in {0, 1, 2, 3}, f"derivative must be 0, 1, 2 or 3, but is {derivative}." @@ -3872,7 +3817,7 @@ def update_ghost_particles(self): def sendrecv_determine_mtbs( self, - alpha: list | tuple | xp.ndarray = (1.0, 1.0, 1.0), + alpha: list | tuple | np.ndarray = (1.0, 1.0, 1.0), ): """ Determine which markers have to be sent from current process and put them in a new array. @@ -3894,34 +3839,34 @@ def sendrecv_determine_mtbs( Eta-values of shape (n_send, :) according to which the sorting is performed. """ # position that determines the sorting (including periodic shift of boundary conditions) - if not isinstance(alpha, xp.ndarray): - alpha = xp.array(alpha, dtype=float) + if not isinstance(alpha, np.ndarray): + alpha = np.array(alpha, dtype=float) assert alpha.size == 3 - assert xp.all(alpha >= 0.0) and xp.all(alpha <= 1.0) + assert np.all(alpha >= 0.0) and np.all(alpha <= 1.0) bi = self.first_pusher_idx - self._sorting_etas = xp.mod( + self._sorting_etas = np.mod( alpha * (self.markers[:, :3] + self.markers[:, bi + 3 + self.vdim : bi + 3 + self.vdim + 3]) + (1.0 - alpha) * self.markers[:, bi : bi + 3], 1.0, ) # check which particles are on the current process domain - self._is_on_proc_domain = xp.logical_and( + self._is_on_proc_domain = np.logical_and( self._sorting_etas > self.domain_array[self.mpi_rank, 0::3], self._sorting_etas < self.domain_array[self.mpi_rank, 1::3], ) # to stay on the current process, all three columns must be True - self._can_stay = xp.all(self._is_on_proc_domain, axis=1) + self._can_stay = np.all(self._is_on_proc_domain, axis=1) # holes and ghosts can stay, too self._can_stay[self.holes] = True self._can_stay[self.ghost_particles] = True # True values can stay on the process, False must be sent, already empty rows (-1) cannot be sent - send_inds = xp.nonzero(~self._can_stay)[0] + send_inds = np.nonzero(~self._can_stay)[0] - hole_inds_after_send = xp.nonzero(xp.logical_or(~self._can_stay, self.holes))[0] + hole_inds_after_send = np.nonzero(np.logical_or(~self._can_stay, self.holes))[0] return hole_inds_after_send, send_inds @@ -3940,16 +3885,16 @@ def sendrecv_get_destinations(self, send_inds): """ # One entry for each process - send_info = xp.zeros(self.mpi_size, dtype=int) + send_info = np.zeros(self.mpi_size, dtype=int) # TODO: do not loop over all processes, start with neighbours and work outwards (using while) for i in range(self.mpi_size): - conds = xp.logical_and( + conds = np.logical_and( self._sorting_etas[send_inds] > self.domain_array[i, 0::3], self._sorting_etas[send_inds] < self.domain_array[i, 1::3], ) - self._send_to_i[i] = xp.nonzero(xp.all(conds, axis=1))[0] + self._send_to_i[i] = np.nonzero(np.all(conds, axis=1))[0] send_info[i] = self._send_to_i[i].size self._send_list[i] = self.markers[send_inds][self._send_to_i[i]] @@ -3971,7 +3916,7 @@ def sendrecv_all_to_all(self, send_info): Amount of marticles to be received from i-th process. """ - recv_info = xp.zeros(self.mpi_size, dtype=int) + recv_info = np.zeros(self.mpi_size, dtype=int) self.mpi_comm.Alltoall(send_info, recv_info) @@ -3991,7 +3936,7 @@ def sendrecv_markers(self, recv_info, hole_inds_after_send): """ # i-th entry holds the number (not the index) of the first hole to be filled by data from process i - first_hole = xp.cumsum(recv_info) - recv_info + first_hole = np.cumsum(recv_info) - recv_info # Initialize send and receive commands for i, (data, N_recv) in enumerate(zip(self._send_list, list(recv_info))): @@ -4001,7 +3946,7 @@ def sendrecv_markers(self, recv_info, hole_inds_after_send): else: self.mpi_comm.Isend(data, dest=i, tag=self.mpi_rank) - self._recvbufs[i] = xp.zeros((N_recv, self.markers.shape[1]), dtype=float) + self._recvbufs[i] = np.zeros((N_recv, self.markers.shape[1]), dtype=float) self._reqs[i] = self.mpi_comm.Irecv(self._recvbufs[i], source=i, tag=i) # Wait for buffer, then put markers into holes @@ -4019,16 +3964,16 @@ def sendrecv_markers(self, recv_info, hole_inds_after_send): f'Strong load imbalance detected: \ number of holes ({hole_inds_after_send.size}) on rank {self.mpi_rank} \ is smaller than number of incoming particles ({first_hole[i] + recv_info[i]}). \ -Increasing the value of "bufsize" in the markers parameters for the next run.', +Increasing the value of "bufsize" in the markers parameters for the next run.' ) self.mpi_comm.Abort() - self.markers[hole_inds_after_send[first_hole[i] + xp.arange(recv_info[i])]] = self._recvbufs[i] + self.markers[hole_inds_after_send[first_hole[i] + np.arange(recv_info[i])]] = self._recvbufs[i] test_reqs.pop() self._reqs[i] = None - def _gather_scalar_in_subcomm_array(self, scalar: int, out: xp.ndarray = None): + def _gather_scalar_in_subcomm_array(self, scalar: int, out: np.ndarray = None): """Return an array of length sub_comm.size, where the i-th entry corresponds to the value of the scalar on process i. @@ -4037,11 +3982,11 @@ def _gather_scalar_in_subcomm_array(self, scalar: int, out: xp.ndarray = None): scalar : int The scalar value on each process. - out : xp.ndarray + out : np.ndarray The returned array (optional). """ if out is None: - _tmp = xp.zeros(self.mpi_size, dtype=int) + _tmp = np.zeros(self.mpi_size, dtype=int) else: assert out.size == self.mpi_size _tmp = out @@ -4056,7 +4001,7 @@ def _gather_scalar_in_subcomm_array(self, scalar: int, out: xp.ndarray = None): return _tmp - def _gather_scalar_in_intercomm_array(self, scalar: int, out: xp.ndarray = None): + def _gather_scalar_in_intercomm_array(self, scalar: int, out: np.ndarray = None): """Return an array of length inter_comm.size, where the i-th entry corresponds to the value of the scalar on clone i. @@ -4065,11 +4010,11 @@ def _gather_scalar_in_intercomm_array(self, scalar: int, out: xp.ndarray = None) scalar : int The scalar value on each clone. - out : xp.ndarray + out : np.ndarray The returned array (optional). """ if out is None: - _tmp = xp.zeros(self.num_clones, dtype=int) + _tmp = np.zeros(self.num_clones, dtype=int) else: assert out.size == self.num_clones _tmp = out @@ -4098,7 +4043,7 @@ class Tesselation: comm : Intracomm MPI communicator. - domain_array : xp.ndarray + domain_array : np.ndarray A 2d array[float] of shape (comm.Get_size(), 9) holding info on the domain decomposition. sorting_boxes : Particles.SortingBoxes @@ -4110,7 +4055,7 @@ def __init__( tiles_pb: int | float, *, comm: Intracomm = None, - domain_array: xp.ndarray = None, + domain_array: np.ndarray = None, sorting_boxes: Particles.SortingBoxes = None, ): if isinstance(tiles_pb, int): @@ -4128,8 +4073,8 @@ def __init__( assert domain_array is not None if domain_array is None: - self._starts = xp.zeros(3) - self._ends = xp.ones(3) + self._starts = np.zeros(3) + self._ends = np.ones(3) else: self._starts = domain_array[self.rank, 0::3] self._ends = domain_array[self.rank, 1::3] @@ -4152,9 +4097,9 @@ def __init__( if n_boxes == 1: self._dims_mask = [True] * 3 else: - self._dims_mask = xp.array(self.boxes_per_dim) > 1 + self._dims_mask = np.array(self.boxes_per_dim) > 1 - min_tiles = 2 ** xp.count_nonzero(self.dims_mask) + min_tiles = 2 ** np.count_nonzero(self.dims_mask) assert self.tiles_pb >= min_tiles, ( f"At least {min_tiles} tiles per sorting box is enforced, but you have {self.tiles_pb}!" ) @@ -4177,19 +4122,19 @@ def get_tiles(self): # print(f'{self.dims_mask = }') # tiles in one sorting box - self._nt_per_dim = xp.array([1, 1, 1]) - _ids = xp.nonzero(self._dims_mask)[0] + self._nt_per_dim = np.array([1, 1, 1]) + _ids = np.nonzero(self._dims_mask)[0] for fac in factors_vec: _nt = self.nt_per_dim[self._dims_mask] - d = _ids[xp.argmin(_nt)] + d = _ids[np.argmin(_nt)] self._nt_per_dim[d] *= fac # print(f'{_nt = }, {d = }, {self.nt_per_dim = }') - assert xp.prod(self.nt_per_dim) == self.tiles_pb + assert np.prod(self.nt_per_dim) == self.tiles_pb # tiles between [0, box_width] in each direction - self._tile_breaks = [xp.linspace(0.0, bw, nt + 1) for bw, nt in zip(self.box_widths, self.nt_per_dim)] - self._tile_midpoints = [(xp.roll(tbs, -1)[:-1] + tbs[:-1]) / 2 for tbs in self.tile_breaks] + self._tile_breaks = [np.linspace(0.0, bw, nt + 1) for bw, nt in zip(self.box_widths, self.nt_per_dim)] + self._tile_midpoints = [(np.roll(tbs, -1)[:-1] + tbs[:-1]) / 2 for tbs in self.tile_breaks] self._tile_volume = 1.0 for tb in self.tile_breaks: self._tile_volume *= tb[1] @@ -4197,8 +4142,8 @@ def get_tiles(self): def draw_markers(self): """Draw markers on the tile midpoints.""" _, eta1 = self._tile_output_arrays() - eta2 = xp.zeros_like(eta1) - eta3 = xp.zeros_like(eta1) + eta2 = np.zeros_like(eta1) + eta3 = np.zeros_like(eta1) nt_x, nt_y, nt_z = self.nt_per_dim @@ -4209,7 +4154,7 @@ def draw_markers(self): for k in range(self.boxes_per_dim[2]): z_midpoints = self._get_midpoints(k, 2) - xx, yy, zz = xp.meshgrid( + xx, yy, zz = np.meshgrid( x_midpoints, y_midpoints, z_midpoints, @@ -4246,7 +4191,7 @@ def _get_quad_pts(self, n_quad=None): self._tile_quad_pts = [] self._tile_quad_wts = [] for nq, tb in zip(n_quad, self.tile_breaks): - pts_loc, wts_loc = xp.polynomial.legendre.leggauss(nq) + pts_loc, wts_loc = np.polynomial.legendre.leggauss(nq) pts, wts = quadrature_grid(tb[:2], pts_loc, wts_loc) self._tile_quad_pts += [pts[0]] self._tile_quad_wts += [wts[0]] @@ -4273,7 +4218,7 @@ def cell_averages(self, fun, n_quad=None): for k in range(self.boxes_per_dim[2]): z_pts = self._get_box_quad_pts(k, 2) - xx, yy, zz = xp.meshgrid( + xx, yy, zz = np.meshgrid( x_pts.flatten(), y_pts.flatten(), z_pts.flatten(), @@ -4302,9 +4247,9 @@ def _tile_output_arrays(self): * the first with one entry for each tile on one sorting box * the second with one entry for each tile on current process """ - # self._quad_pts = [xp.zeros((nt, nq)).flatten() for nt, nq in zip(self.nt_per_dim, self.tile_quad_pts)] - single_box_out = xp.zeros(self.nt_per_dim) - out = xp.tile(single_box_out, self.boxes_per_dim) + # self._quad_pts = [np.zeros((nt, nq)).flatten() for nt, nq in zip(self.nt_per_dim, self.tile_quad_pts)] + single_box_out = np.zeros(self.nt_per_dim) + out = np.tile(single_box_out, self.boxes_per_dim) return single_box_out, out def _get_midpoints(self, i: int, dim: int): @@ -4325,13 +4270,13 @@ def _get_box_quad_pts(self, i: int, dim: int): Returns ------- - x_pts : xp.array + x_pts : np.array 2d array of shape (n_tiles_pb, n_tile_quad_pts) """ xl = self.starts[dim] + i * self.box_widths[dim] x_tile_breaks = xl + self.tile_breaks[dim][:-1] x_tile_pts = self.tile_quad_pts[dim] - x_pts = xp.tile(x_tile_breaks, (x_tile_pts.size, 1)).T + x_tile_pts + x_pts = np.tile(x_tile_breaks, (x_tile_pts.size, 1)).T + x_tile_pts return x_pts @property diff --git a/src/struphy/pic/particles.py b/src/struphy/pic/particles.py index 6c818b3ee..ef9cfaa6a 100644 --- a/src/struphy/pic/particles.py +++ b/src/struphy/pic/particles.py @@ -1,17 +1,12 @@ import copy -import cunumpy as xp - -from struphy.fields_background import equils -from struphy.fields_background.base import FluidEquilibrium, FluidEquilibriumWithB +from struphy.fields_background.base import FluidEquilibriumWithB from struphy.fields_background.projected_equils import ProjectedFluidEquilibriumWithB from struphy.geometry.base import Domain -from struphy.geometry.utilities import TransformedPformComponent -from struphy.initial.base import Perturbation from struphy.kinetic_background import maxwellians -from struphy.kinetic_background.base import Maxwellian, SumKineticBackground from struphy.pic import utilities_kernels from struphy.pic.base import Particles +from struphy.utils.arrays import xp as np class Particles6D(Particles): @@ -28,8 +23,8 @@ class Particles6D(Particles): """ @classmethod - def default_background(cls): - return maxwellians.Maxwellian3D() + def default_bckgr_params(cls): + return {"Maxwellian3D": {}} def __init__( self, @@ -37,19 +32,17 @@ def __init__( ): kwargs["type"] = "full_f" - if "background" not in kwargs: - kwargs["background"] = self.default_background() - elif kwargs["background"] is None: - kwargs["background"] = self.default_background() + if "bckgr_params" not in kwargs: + kwargs["bckgr_params"] = self.default_bckgr_params() # default number of diagnostics and auxiliary columns self._n_cols_diagnostics = kwargs.pop("n_cols_diagn", 0) self._n_cols_aux = kwargs.pop("n_cols_aux", 5) - + print(kwargs.keys()) super().__init__(**kwargs) # call projected mhd equilibrium in case of CanonicalMaxwellian - if isinstance(kwargs["background"], maxwellians.CanonicalMaxwellian): + if "CanonicalMaxwellian" in kwargs["bckgr_params"]: assert isinstance(self.equil, FluidEquilibriumWithB), ( "CanonicalMaxwellian needs background with magnetic field." ) @@ -97,16 +90,16 @@ def svol(self, eta1, eta2, eta3, *v): """ # load sampling density svol (normalized to 1 in logical space) maxw_params = { - "n": (1.0, None), - "u1": (self.loading_params.moments[0], None), - "u2": (self.loading_params.moments[1], None), - "u3": (self.loading_params.moments[2], None), - "vth1": (self.loading_params.moments[3], None), - "vth2": (self.loading_params.moments[4], None), - "vth3": (self.loading_params.moments[5], None), + "n": 1.0, + "u1": self.loading_params["moments"][0], + "u2": self.loading_params["moments"][1], + "u3": self.loading_params["moments"][2], + "vth1": self.loading_params["moments"][3], + "vth2": self.loading_params["moments"][4], + "vth3": self.loading_params["moments"][5], } - fun = maxwellians.Maxwellian3D(**maxw_params) + fun = maxwellians.Maxwellian3D(maxw_params=maxw_params) if self.spatial == "uniform": return fun(eta1, eta2, eta3, *v) @@ -142,7 +135,7 @@ def s0(self, eta1, eta2, eta3, *v, flat_eval=False, remove_holes=True): The 0-form sampling density. ------- """ - assert self.domain, "self.domain must be set to call the sampling density 0-form." + assert self.domain, f"self.domain must be set to call the sampling density 0-form." return self.domain.transform( self.svol(eta1, eta2, eta3, *v), @@ -219,8 +212,7 @@ def save_constants_of_motion(self): # send particles to the guiding center positions self.markers[~self.holes, self.first_pusher_idx : self.first_pusher_idx + 3] = self.markers[ - ~self.holes, - slice_gc, + ~self.holes, slice_gc ] if self.mpi_comm is not None: self.mpi_sort_markers(alpha=1) @@ -247,45 +239,35 @@ class DeltaFParticles6D(Particles6D): """ @classmethod - def default_background(cls): - return maxwellians.Maxwellian3D() + def default_bckgr_params(cls): + return {"Maxwellian3D": {}} def __init__( self, **kwargs, ): kwargs["type"] = "delta_f" - if "weights_params" in kwargs: - kwargs["weights_params"].control_variate = False + kwargs["control_variate"] = False super().__init__(**kwargs) def _set_initial_condition(self): - # bp_copy = copy.deepcopy(self.bckgr_params) - # pp_copy = copy.deepcopy(self.pert_params) - - # # Prepare delta-f perturbation parameters - # if pp_copy is not None: - # for fi in bp_copy: - # # Set background to zero (if "use_background_n" in perturbation params is set to false or not in keys) - # if fi in pp_copy: - # if "use_background_n" in pp_copy[fi]: - # if not pp_copy[fi]["use_background_n"]: - # bp_copy[fi]["n"] = 0.0 - # else: - # bp_copy[fi]["n"] = 0.0 - # else: - # bp_copy[fi]["n"] = 0.0 - self.set_n_to_zero(self.initial_condition) - - super()._set_initial_condition() - - def set_n_to_zero(self, background: Maxwellian | SumKineticBackground): - if isinstance(background, Maxwellian): - background.maxw_params["n"] = (0.0, background.maxw_params["n"][1]) - else: - assert isinstance(background, SumKineticBackground) - self.set_n_to_zero(background._f1) - self.set_n_to_zero(background._f2) + bp_copy = copy.deepcopy(self.bckgr_params) + pp_copy = copy.deepcopy(self.pert_params) + + # Prepare delta-f perturbation parameters + if pp_copy is not None: + for fi in bp_copy: + # Set background to zero (if "use_background_n" in perturbation params is set to false or not in keys) + if fi in pp_copy: + if "use_background_n" in pp_copy[fi]: + if not pp_copy[fi]["use_background_n"]: + bp_copy[fi]["n"] = 0.0 + else: + bp_copy[fi]["n"] = 0.0 + else: + bp_copy[fi]["n"] = 0.0 + + super()._set_initial_condition(bp_copy=bp_copy, pp_copy=pp_copy) class Particles5D(Particles): @@ -320,20 +302,18 @@ class Particles5D(Particles): """ @classmethod - def default_background(cls): - return maxwellians.GyroMaxwellian2D() + def default_bckgr_params(cls): + return {"GyroMaxwellian2D": {}} def __init__( self, projected_equil: ProjectedFluidEquilibriumWithB, **kwargs, ): - assert projected_equil is not None, "Particles5D needs a projected MHD equilibrium." - kwargs["type"] = "full_f" - # if "bckgr_params" not in kwargs: - # kwargs["bckgr_params"] = self.default_bckgr_params() + if "bckgr_params" not in kwargs: + kwargs["bckgr_params"] = self.default_bckgr_params() # default number of diagnostics and auxiliary columns self._n_cols_diagnostics = kwargs.pop("n_cols_diagn", 3) @@ -353,7 +333,6 @@ def __init__( self._unit_b1_h = self.projected_equil.unit_b1 self._derham = self.projected_equil.derham - self._tmp0 = self.derham.Vh["0"].zeros() self._tmp2 = self.derham.Vh["2"].zeros() @property @@ -422,18 +401,14 @@ def svol(self, eta1, eta2, eta3, *v): # load sampling density svol (normalized to 1 in logical space) maxw_params = { "n": 1.0, - "u_para": self.loading_params.moments[0], - "u_perp": self.loading_params.moments[1], - "vth_para": self.loading_params.moments[2], - "vth_perp": self.loading_params.moments[3], + "u_para": self.loading_params["moments"][0], + "u_perp": self.loading_params["moments"][1], + "vth_para": self.loading_params["moments"][2], + "vth_perp": self.loading_params["moments"][3], } self._svol = maxwellians.GyroMaxwellian2D( - n=(1.0, None), - u_para=(self.loading_params.moments[0], None), - u_perp=(self.loading_params.moments[1], None), - vth_para=(self.loading_params.moments[2], None), - vth_perp=(self.loading_params.moments[3], None), + maxw_params=maxw_params, volume_form=True, equil=self._magn_bckgr, ) @@ -563,7 +538,7 @@ def save_constants_of_motion(self): self.absB0_h._data, ) - def save_magnetic_energy(self, PBb): + def save_magnetic_energy(self, b2): r""" Calculate magnetic field energy at each particles' position and assign it into markers[:,self.first_diagnostics_idx]. @@ -574,17 +549,22 @@ def save_magnetic_energy(self, PBb): Finite element coefficients of the time-dependent magnetic field. """ - E0T = self.derham.extraction_ops["0"].transpose() - PBbt = E0T.dot(PBb, out=self._tmp0) - PBbt.update_ghost_regions() + E2T = self.derham.extraction_ops["2"].transpose() + b2t = E2T.dot(b2, out=self._tmp2) + b2t.update_ghost_regions() - utilities_kernels.eval_magnetic_energy_PBb( + utilities_kernels.eval_magnetic_energy( self.markers, self.derham.args_derham, self.domain.args_domain, self.first_diagnostics_idx, self.absB0_h._data, - PBbt._data, + self.unit_b1_h[0]._data, + self.unit_b1_h[1]._data, + self.unit_b1_h[2]._data, + b2t[0]._data, + b2t[1]._data, + b2t[2]._data, ) def save_magnetic_background_energy(self): @@ -646,8 +626,8 @@ class Particles3D(Particles): """ @classmethod - def default_background(cls): - return maxwellians.ColdPlasma() + def default_bckgr_params(cls): + return {"ColdPlasma": {}} def __init__( self, @@ -655,10 +635,8 @@ def __init__( ): kwargs["type"] = "full_f" - if "background" not in kwargs: - kwargs["background"] = self.default_background() - elif kwargs["background"] is None: - kwargs["background"] = self.default_background() + if "bckgr_params" not in kwargs: + kwargs["bckgr_params"] = self.default_bckgr_params() # default number of diagnostics and auxiliary columns self._n_cols_diagnostics = kwargs.pop("n_cols_diagn", 0) @@ -771,8 +749,8 @@ class ParticlesSPH(Particles): """ @classmethod - def default_background(cls): - return equils.ConstantVelocity() + def default_bckgr_params(cls): + return {"ConstantVelocity": {}} def __init__( self, @@ -780,20 +758,14 @@ def __init__( ): kwargs["type"] = "sph" - if "background" not in kwargs: - bckgr = self.default_background() - bckgr.domain = kwargs["domain"] - kwargs["background"] = bckgr - elif kwargs["background"] is None: - bckgr = self.default_background() - bckgr.domain = kwargs["domain"] - kwargs["background"] = bckgr + if "bckgr_params" not in kwargs: + kwargs["bckgr_params"] = self.default_bckgr_params() if "boxes_per_dim" not in kwargs: - kwargs["boxes_per_dim"] = (1, 1, 1) + boxes_per_dim = (1, 1, 1) else: if kwargs["boxes_per_dim"] is None: - kwargs["boxes_per_dim"] = (1, 1, 1) + boxes_per_dim = (1, 1, 1) # TODO: maybe this needs a fix # else: @@ -889,3 +861,60 @@ def s0(self, eta1, eta2, eta3, *v, flat_eval=False, remove_holes=True): kind="3_to_0", remove_outside=remove_holes, ) + + def _set_initial_condition(self): + """Set a callable initial condition f_init as a 0-form (scalar), and u_init in Cartesian coordinates.""" + from struphy.feec.psydac_derham import transform_perturbation + from struphy.fields_background.base import FluidEquilibrium + + pp_copy = copy.deepcopy(self.pert_params) + + # Get the initialization function and pass the correct arguments + assert isinstance(self.f0, FluidEquilibrium) + self._u_init = self.f0.u_cart + + if pp_copy is not None: + if "n" in pp_copy: + for _type, _params in pp_copy["n"].items(): # only one perturbation is taken into account at the moment + _fun = transform_perturbation(_type, _params, "0", self.domain) + if "u1" in pp_copy: + for _type, _params in pp_copy[ + "u1" + ].items(): # only one perturbation is taken into account at the moment + _fun = transform_perturbation(_type, _params, "v", self.domain) + _fun_cart = lambda e1, e2, e3: self.domain.push(_fun, e1, e2, e3, kind="v") + self._u_init = lambda e1, e2, e3: self.f0.u_cart(e1, e2, e3)[0] + _fun_cart(e1, e2, e3) + # TODO: add other velocity components + else: + _fun = None + + def _f_init(*etas, flat_eval=False): + if len(etas) == 1: + if _fun is None: + out = self.f0.n0(etas[0]) + else: + out = self.f0.n0(etas[0]) + _fun(*etas[0].T) + else: + assert len(etas) == 3 + E1, E2, E3, is_sparse_meshgrid = Domain.prepare_eval_pts( + etas[0], + etas[1], + etas[2], + flat_eval=flat_eval, + ) + + out0 = self.f0.n0(E1, E2, E3) + + if _fun is None: + out = out0 + else: + out1 = _fun(E1, E2, E3) + assert out0.shape == out1.shape + out = out0 + out1 + + if flat_eval: + out = np.squeeze(out) + + return out + + self._f_init = _f_init diff --git a/src/struphy/pic/pushing/pusher.py b/src/struphy/pic/pushing/pusher.py index 14c756b31..b18d89b86 100644 --- a/src/struphy/pic/pushing/pusher.py +++ b/src/struphy/pic/pushing/pusher.py @@ -1,12 +1,16 @@ "Accelerated particle pushing." -import cunumpy as xp -from line_profiler import profile -from psydac.ddm.mpi import mpi as MPI +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mpi4py import MPI +else: + from psydac.ddm.mpi import mpi as MPI from struphy.kernel_arguments.pusher_args_kernels import DerhamArguments, DomainArguments from struphy.pic.base import Particles from struphy.profiling.profiling import ProfileManager +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel @@ -134,9 +138,9 @@ def __init__( comps = ker_args[2] # check marker array column number - assert isinstance(comps, xp.ndarray) + assert isinstance(comps, np.ndarray) assert column_nr + comps.size < particles.n_cols, ( - f"{column_nr + comps.size} not smaller than {particles.n_cols =}; not enough columns in marker array !!" + f"{column_nr + comps.size} not smaller than {particles.n_cols = }; not enough columns in marker array !!" ) # prepare and check eval_kernels @@ -146,15 +150,15 @@ def __init__( comps = ker_args[3] # check marker array column number - assert isinstance(comps, xp.ndarray) + assert isinstance(comps, np.ndarray) assert column_nr + comps.size < particles.n_cols, ( - f"{column_nr + comps.size} not smaller than {particles.n_cols =}; not enough columns in marker array !!" + f"{column_nr + comps.size} not smaller than {particles.n_cols = }; not enough columns in marker array !!" ) self._init_kernels = init_kernels self._eval_kernels = eval_kernels - self._residuals = xp.zeros(self.particles.markers.shape[0]) + self._residuals = np.zeros(self.particles.markers.shape[0]) self._converged_loc = self._residuals == 1.0 self._not_converged_loc = self._residuals == 0.0 @@ -163,7 +167,6 @@ def __init__( else: self._box_comm = False - @profile def __call__(self, dt: float): """ Applies the chosen pusher kernel by a time step dt, @@ -178,10 +181,10 @@ def __call__(self, dt: float): residual_idx = self.particles.residual_idx if self.verbose: - print(f"{first_pusher_idx =}") - print(f"{first_shift_idx =}") - print(f"{residual_idx =}") - print(f"{self.particles.n_cols =}") + print(f"{first_pusher_idx = }") + print(f"{first_shift_idx = }") + print(f"{residual_idx = }") + print(f"{self.particles.n_cols = }") init_slice = slice(first_pusher_idx, first_shift_idx) shift_slice = slice(first_shift_idx, residual_idx) @@ -209,7 +212,7 @@ def __call__(self, dt: float): add_args = ker_args[3] ker( - xp.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), column_nr, comps, self.particles.args_markers, @@ -224,14 +227,14 @@ def __call__(self, dt: float): # start stages (e.g. n_stages=4 for RK4) for stage in range(self.n_stages): # start iteration (maxiter=1 for explicit schemes) - n_not_converged = xp.empty(1, dtype=int) + n_not_converged = np.empty(1, dtype=int) n_not_converged[0] = self.particles.n_mks_loc k = 0 if self.verbose and self.maxiter > 1: max_res = 1.0 print( - f"rank {rank}: {k =}, tol: {self._tol}, {n_not_converged[0] =}, {max_res =}", + f"rank {rank}: {k = }, tol: {self._tol}, {n_not_converged[0] = }, {max_res = }", ) if self.particles.mpi_comm is not None: self.particles.mpi_comm.Barrier() @@ -298,18 +301,18 @@ def __call__(self, dt: float): # compute number of non-converged particles (maxiter=1 for explicit schemes) if self.maxiter > 1: self._residuals[:] = markers[:, residual_idx] - max_res = xp.max(self._residuals) + max_res = np.max(self._residuals) if max_res < 0.0: max_res = None self._converged_loc[:] = self._residuals < self._tol self._not_converged_loc[:] = ~self._converged_loc - n_not_converged[0] = xp.count_nonzero( + n_not_converged[0] = np.count_nonzero( self._not_converged_loc, ) if self.verbose: print( - f"rank {rank}: {k =}, tol: {self._tol}, {n_not_converged[0] =}, {max_res =}", + f"rank {rank}: {k = }, tol: {self._tol}, {n_not_converged[0] = }, {max_res = }", ) if self.particles.mpi_comm is not None: self.particles.mpi_comm.Barrier() @@ -329,7 +332,7 @@ def __call__(self, dt: float): if self.maxiter > 1: rank = self.particles.mpi_rank print( - f"rank {rank}: {k =}, maxiter={self.maxiter} reached! tol: {self._tol}, {n_not_converged[0] =}, {max_res =}", + f"rank {rank}: {k = }, maxiter={self.maxiter} reached! tol: {self._tol}, {n_not_converged[0] = }, {max_res = }", ) # sort markers according to domain decomposition if self.mpi_sort == "each": diff --git a/src/struphy/pic/pushing/pusher_kernels.py b/src/struphy/pic/pushing/pusher_kernels.py index 47b6a71ba..429e1c722 100644 --- a/src/struphy/pic/pushing/pusher_kernels.py +++ b/src/struphy/pic/pushing/pusher_kernels.py @@ -1615,15 +1615,6 @@ def push_bxu_Hdiv_pauli( # -- removed omp: #$ omp end parallel -@stack_array( - "dfm", - "dfinv", - "dfinv_t", - "e", - "e_cart", - "GXu", - "v", -) def push_pc_GXu_full( dt: float, stage: int, @@ -1639,6 +1630,7 @@ def push_pc_GXu_full( GXu_31: "float[:,:,:]", GXu_32: "float[:,:,:]", GXu_33: "float[:,:,:]", + boundary_cut: "float", ): r"""Updates @@ -1679,6 +1671,10 @@ def push_pc_GXu_full( if markers[ip, 0] == -1.0: continue + # boundary cut + if markers[ip, 0] < boundary_cut or markers[ip, 0] > 1.0 - boundary_cut: + continue + eta1 = markers[ip, 0] eta2 = markers[ip, 1] eta3 = markers[ip, 2] @@ -1744,15 +1740,6 @@ def push_pc_GXu_full( markers[ip, 3:6] -= dt * e_cart / 2.0 -@stack_array( - "dfm", - "dfinv", - "dfinv_t", - "e", - "e_cart", - "GXu", - "v", -) def push_pc_GXu( dt: float, stage: int, @@ -1768,6 +1755,7 @@ def push_pc_GXu( GXu_31: "float[:,:,:]", GXu_32: "float[:,:,:]", GXu_33: "float[:,:,:]", + boundary_cut: "float", ): r"""Updates @@ -1795,6 +1783,7 @@ def push_pc_GXu( e = empty(3, dtype=float) e_cart = empty(3, dtype=float) GXu = empty((3, 3), dtype=float) + GXu_t = empty((3, 3), dtype=float) # particle velocity v = empty(3, dtype=float) @@ -1808,6 +1797,10 @@ def push_pc_GXu( if markers[ip, 0] == -1.0: continue + # boundary cut + if markers[ip, 0] < boundary_cut or markers[ip, 0] > 1.0 - boundary_cut: + continue + eta1 = markers[ip, 0] eta2 = markers[ip, 1] eta3 = markers[ip, 2] @@ -1946,7 +1939,7 @@ def push_eta_stage( @stack_array("dfm", "dfinv", "dfinv_t", "ginv", "v", "u", "k", "k_v", "k_u") -def push_pc_eta_stage_Hcurl( +def push_pc_eta_rk4_Hcurl_full( dt: float, stage: int, args_markers: "MarkerArguments", @@ -1955,10 +1948,6 @@ def push_pc_eta_stage_Hcurl( u_1: "float[:,:,:]", u_2: "float[:,:,:]", u_3: "float[:,:,:]", - use_perp_model: "bool", - a: "float[:]", - b: "float[:]", - c: "float[:]", ): r"""Fourth order Runge-Kutta solve of @@ -1971,6 +1960,14 @@ def push_pc_eta_stage_Hcurl( .. math:: \textnormal{vec}( \hat{\mathbf U}^{1}) = G^{-1}\hat{\mathbf U}^{1}\,,\qquad \textnormal{vec}( \hat{\mathbf U}^{2}) = \frac{\hat{\mathbf U}^{2}}{\sqrt g}\,. + + Parameters + ---------- + u_1, u_2, u_3: array[float] + 3d array of FE coeffs of U-field, either as 1-form or as 2-form. + + u_basis : int + U is 1-form (u_basis=1) or a 2-form (u_basis=2). """ # allocate metric coeffs @@ -1996,13 +1993,22 @@ def push_pc_eta_stage_Hcurl( first_init_idx = args_markers.first_init_idx first_free_idx = args_markers.first_free_idx - # get number of stages - n_stages = shape(b)[0] + # assign factor of k for each stage + if stage == 0 or stage == 3: + nk = 1.0 + else: + nk = 2.0 - if stage == n_stages - 1: + # which stage + if stage == 3: last = 1.0 + cont = 0.0 + elif stage == 2: + last = 0.0 + cont = 2.0 else: last = 0.0 + cont = 1.0 for ip in range(n_markers): # only do something if particle is a "true" particle (i.e. not a hole) @@ -2047,9 +2053,6 @@ def push_pc_eta_stage_Hcurl( u, ) - if use_perp_model: - u[2] = 0.0 - # transform to vector field linalg_kernels.matrix_vector(ginv, u, k_u) @@ -2057,18 +2060,18 @@ def push_pc_eta_stage_Hcurl( k[:] = k_v + k_u # accum k - markers[ip, first_free_idx : first_free_idx + 3] += dt * b[stage] * k + markers[ip, first_free_idx : first_free_idx + 3] += k * nk / 6.0 # update markers for the next stage markers[ip, 0:3] = ( markers[ip, first_init_idx : first_init_idx + 3] - + dt * k * a[stage] - + last * markers[ip, first_free_idx : first_free_idx + 3] + + dt * k / 2 * cont + + dt * markers[ip, first_free_idx : first_free_idx + 3] * last ) @stack_array("dfm", "dfinv", "dfinv_t", "ginv", "v", "u", "k", "k_v", "k_u") -def push_pc_eta_stage_Hdiv( +def push_pc_eta_rk4_Hdiv_full( dt: float, stage: int, args_markers: "MarkerArguments", @@ -2077,10 +2080,6 @@ def push_pc_eta_stage_Hdiv( u_1: "float[:,:,:]", u_2: "float[:,:,:]", u_3: "float[:,:,:]", - use_perp_model: "bool", - a: "float[:]", - b: "float[:]", - c: "float[:]", ): r"""Fourth order Runge-Kutta solve of @@ -2093,6 +2092,14 @@ def push_pc_eta_stage_Hdiv( .. math:: \textnormal{vec}( \hat{\mathbf U}^{1}) = G^{-1}\hat{\mathbf U}^{1}\,,\qquad \textnormal{vec}( \hat{\mathbf U}^{2}) = \frac{\hat{\mathbf U}^{2}}{\sqrt g}\,. + + Parameters + ---------- + u_1, u_2, u_3: array[float] + 3d array of FE coeffs of U-field, either as 1-form or as 2-form. + + u_basis : int + U is 1-form (u_basis=1) or a 2-form (u_basis=2). """ # allocate metric coeffs @@ -2118,13 +2125,19 @@ def push_pc_eta_stage_Hdiv( first_init_idx = args_markers.first_init_idx first_free_idx = args_markers.first_free_idx - # get number of stages - n_stages = shape(b)[0] + # assign factor of k for each stage + if stage == 0 or stage == 3: + nk = 1.0 + else: + nk = 2.0 - if stage == n_stages - 1: + # is it the last stage? + if stage == 3: last = 1.0 + cont = 0.0 else: last = 0.0 + cont = 1.0 for ip in range(n_markers): # only do something if particle is a "true" particle (i.e. not a hole) @@ -2170,8 +2183,397 @@ def push_pc_eta_stage_Hdiv( u, ) - if use_perp_model: - u[2] = 0.0 + # transform to vector field + k_u[:] = u / det_df + + # sum contribs + k[:] = k_v + k_u + + # accum k + markers[ip, first_free_idx : first_free_idx + 3] += k * nk / 6.0 + + # update markers for the next stage + markers[ip, 0:3] = ( + markers[ip, first_init_idx : first_init_idx + 3] + + dt * k / 2 * cont + + dt * markers[ip, first_free_idx : first_free_idx + 3] * last + ) + + +@stack_array("dfm", "dfinv", "dfinv_t", "ginv", "v", "u", "k", "k_v") +def push_pc_eta_rk4_H1vec_full( + dt: float, + stage: int, + args_markers: "MarkerArguments", + args_domain: "DomainArguments", + args_derham: "DerhamArguments", + u_1: "float[:,:,:]", + u_2: "float[:,:,:]", + u_3: "float[:,:,:]", +): + r"""Fourth order Runge-Kutta solve of + + .. math:: + + \frac{\textnormal d \boldsymbol \eta_p(t)}{\textnormal d t} = DF^{-1}(\boldsymbol \eta_p(t)) \mathbf v + \textnormal{vec}( \hat{\mathbf U}^{1(2)}) + + for each marker :math:`p` in markers array, where :math:`\mathbf v` is constant and + + .. math:: + + \textnormal{vec}( \hat{\mathbf U}^{1}) = G^{-1}\hat{\mathbf U}^{1}\,,\qquad \textnormal{vec}( \hat{\mathbf U}^{2}) = \frac{\hat{\mathbf U}^{2}}{\sqrt g}\,. + + Parameters + ---------- + u_1, u_2, u_3 : array[float] + 3d array of FE coeffs of U-field, either as 1-form or as 2-form. + + u_basis : int + U is 1-form (u_basis=1) or a 2-form (u_basis=2). + """ + + # allocate metric coeffs + dfm = empty((3, 3), dtype=float) + dfinv = empty((3, 3), dtype=float) + dfinv_t = empty((3, 3), dtype=float) + ginv = empty((3, 3), dtype=float) + + # marker and velocity + v = empty(3, dtype=float) + + # U-fiels + u = empty(3, dtype=float) + + # intermediate stages in RK4 + k = empty(3, dtype=float) + k_v = empty(3, dtype=float) + + # get marker arguments + markers = args_markers.markers + n_markers = args_markers.n_markers + first_init_idx = args_markers.first_init_idx + first_free_idx = args_markers.first_free_idx + + # assign factor of k for each stage + if stage == 0 or stage == 3: + nk = 1.0 + else: + nk = 2.0 + + # which stage + if stage == 3: + last = 1.0 + cont = 0.0 + elif stage == 2: + last = 0.0 + cont = 2.0 + else: + last = 0.0 + cont = 1.0 + + for ip in range(n_markers): + # only do something if particle is a "true" particle (i.e. not a hole) + if markers[ip, 0] == -1.0: + continue + + e1 = markers[ip, 0] + e2 = markers[ip, 1] + e3 = markers[ip, 2] + v[:] = markers[ip, 3:6] + + # ----------------- stage n in Runge-Kutta method ------------------- + # evaluate Jacobian, result in dfm + evaluation_kernels.df( + e1, + e2, + e3, + args_domain, + dfm, + ) + + # metric coeffs + linalg_kernels.matrix_inv(dfm, dfinv) + linalg_kernels.transpose(dfinv, dfinv_t) + linalg_kernels.matrix_matrix(dfinv, dfinv_t, ginv) + + # pull-back of velocity + linalg_kernels.matrix_vector(dfinv, v, k_v) + + # spline evaluation + span1, span2, span3 = get_spans(e1, e2, e3, args_derham) + + # U-field + eval_vectorfield_spline_mpi( + span1, + span2, + span3, + args_derham, + u_1, + u_2, + u_3, + u, + ) + + # sum contribs + k[:] = k_v + u + + # accum k + markers[ip, first_free_idx : first_free_idx + 3] += k * nk / 6.0 + + # update markers for the next stage + markers[ip, 0:3] = ( + markers[ip, first_init_idx : first_init_idx + 3] + + dt * k / 2 * cont + + dt * markers[ip, first_free_idx : first_free_idx + 3] * last + ) + + +@stack_array("dfm", "dfinv", "dfinv_t", "ginv", "v", "u", "k", "k_v", "k_u") +def push_pc_eta_rk4_Hcurl( + dt: float, + stage: int, + args_markers: "MarkerArguments", + args_domain: "DomainArguments", + args_derham: "DerhamArguments", + u_1: "float[:,:,:]", + u_2: "float[:,:,:]", + u_3: "float[:,:,:]", +): + r"""Fourth order Runge-Kutta solve of + + .. math:: + + \frac{\textnormal d \boldsymbol \eta_p(t)}{\textnormal d t} = DF^{-1}(\boldsymbol \eta_p(t)) \mathbf v + \textnormal{vec}( \hat{\mathbf U}^{1(2)}) + + for each marker :math:`p` in markers array, where :math:`\mathbf v` is constant and + + .. math:: + + \textnormal{vec}( \hat{\mathbf U}^{1}) = G^{-1}\hat{\mathbf U}^{1}\,,\qquad \textnormal{vec}( \hat{\mathbf U}^{2}) = \frac{\hat{\mathbf U}^{2}}{\sqrt g}\,. + + Parameters + ---------- + u_1, u_2, u_3 : array[float] + 3d array of FE coeffs of U-field, either as 1-form or as 2-form. + + u_basis : int + U is 1-form (u_basis=1) or a 2-form (u_basis=2). + """ + + # allocate metric coeffs + dfm = empty((3, 3), dtype=float) + dfinv = empty((3, 3), dtype=float) + dfinv_t = empty((3, 3), dtype=float) + ginv = empty((3, 3), dtype=float) + + # marker velocity + v = empty(3, dtype=float) + + # U-fiels + u = empty(3, dtype=float) + + # intermediate stages in RK4 + k = empty(3, dtype=float) + k_v = empty(3, dtype=float) + k_u = empty(3, dtype=float) + + # get marker arguments + markers = args_markers.markers + n_markers = args_markers.n_markers + first_init_idx = args_markers.first_init_idx + first_free_idx = args_markers.first_free_idx + + # assign factor of k for each stage + if stage == 0 or stage == 3: + nk = 1.0 + else: + nk = 2.0 + + # which stage + if stage == 3: + last = 1.0 + cont = 0.0 + elif stage == 2: + last = 0.0 + cont = 2.0 + else: + last = 0.0 + cont = 1.0 + + for ip in range(n_markers): + # only do something if particle is a "true" particle (i.e. not a hole) + if markers[ip, 0] == -1.0: + continue + + e1 = markers[ip, 0] + e2 = markers[ip, 1] + e3 = markers[ip, 2] + v[:] = markers[ip, 3:6] + + # ----------------- stage n in Runge-Kutta method ------------------- + # evaluate Jacobian, result in dfm + evaluation_kernels.df( + e1, + e2, + e3, + args_domain, + dfm, + ) + + # metric coeffs + linalg_kernels.matrix_inv(dfm, dfinv) + linalg_kernels.transpose(dfinv, dfinv_t) + linalg_kernels.matrix_matrix(dfinv, dfinv_t, ginv) + + # pull-back of velocity + linalg_kernels.matrix_vector(dfinv, v, k_v) + + # spline evaluation + span1, span2, span3 = get_spans(e1, e2, e3, args_derham) + + # U-field + eval_1form_spline_mpi( + span1, + span2, + span3, + args_derham, + u_1, + u_2, + u_3, + u, + ) + u[2] = 0.0 + + # transform to vector field + linalg_kernels.matrix_vector(ginv, u, k_u) + + # sum contribs + k[:] = k_v + k_u + + # accum k + markers[ip, first_free_idx : first_free_idx + 3] += k * nk / 6.0 + + # update markers for the next stage + markers[ip, 0:3] = ( + markers[ip, first_init_idx : first_init_idx + 3] + + dt * k / 2 * cont + + dt * markers[ip, first_free_idx : first_free_idx + 3] * last + ) + + +@stack_array("dfm", "dfinv", "dfinv_t", "ginv", "v", "u", "k", "k_v", "k_u") +def push_pc_eta_rk4_Hdiv( + dt: float, + stage: int, + args_markers: "MarkerArguments", + args_domain: "DomainArguments", + args_derham: "DerhamArguments", + u_1: "float[:,:,:]", + u_2: "float[:,:,:]", + u_3: "float[:,:,:]", +): + r"""Fourth order Runge-Kutta solve of + + .. math:: + + \frac{\textnormal d \boldsymbol \eta_p(t)}{\textnormal d t} = DF^{-1}(\boldsymbol \eta_p(t)) \mathbf v + \textnormal{vec}( \hat{\mathbf U}^{1(2)}) + + for each marker :math:`p` in markers array, where :math:`\mathbf v` is constant and + + .. math:: + + \textnormal{vec}( \hat{\mathbf U}^{1}) = G^{-1}\hat{\mathbf U}^{1}\,,\qquad \textnormal{vec}( \hat{\mathbf U}^{2}) = \frac{\hat{\mathbf U}^{2}}{\sqrt g}\,. + + Parameters + ---------- + u_1, u_2, u_3 : array[float] + 3d array of FE coeffs of U-field, either as 1-form or as 2-form. + + u_basis : int + U is 1-form (u_basis=1) or a 2-form (u_basis=2). + """ + + # allocate metric coeffs + dfm = empty((3, 3), dtype=float) + dfinv = empty((3, 3), dtype=float) + dfinv_t = empty((3, 3), dtype=float) + ginv = empty((3, 3), dtype=float) + + # marker velocity + v = empty(3, dtype=float) + + # U-fiels + u = empty(3, dtype=float) + + # intermediate stages in RK4 + k = empty(3, dtype=float) + k_v = empty(3, dtype=float) + k_u = empty(3, dtype=float) + + # get marker arguments + markers = args_markers.markers + n_markers = args_markers.n_markers + first_init_idx = args_markers.first_init_idx + first_free_idx = args_markers.first_free_idx + + # assign factor of k for each stage + if stage == 0 or stage == 3: + nk = 1.0 + else: + nk = 2.0 + + # is it the last stage? + if stage == 3: + last = 1.0 + cont = 0.0 + else: + last = 0.0 + cont = 1.0 + + for ip in range(n_markers): + # only do something if particle is a "true" particle (i.e. not a hole) + if markers[ip, 0] == -1.0: + continue + + e1 = markers[ip, 0] + e2 = markers[ip, 1] + e3 = markers[ip, 2] + v[:] = markers[ip, 3:6] + + # ----------------- stage n in Runge-Kutta method ------------------- + # evaluate Jacobian, result in dfm + evaluation_kernels.df( + e1, + e2, + e3, + args_domain, + dfm, + ) + + # metric coeffs + det_df = linalg_kernels.det(dfm) + linalg_kernels.matrix_inv(dfm, dfinv) + linalg_kernels.transpose(dfinv, dfinv_t) + linalg_kernels.matrix_matrix(dfinv, dfinv_t, ginv) + + # pull-back of velocity + linalg_kernels.matrix_vector(dfinv, v, k_v) + + # spline evaluation + span1, span2, span3 = get_spans(e1, e2, e3, args_derham) + + # U-field + eval_2form_spline_mpi( + span1, + span2, + span3, + args_derham, + u_1, + u_2, + u_3, + u, + ) + u[2] = 0.0 # transform to vector field k_u[:] = u / det_df @@ -2180,18 +2582,18 @@ def push_pc_eta_stage_Hdiv( k[:] = k_v + k_u # accum k - markers[ip, first_free_idx : first_free_idx + 3] += dt * b[stage] * k + markers[ip, first_free_idx : first_free_idx + 3] += k * nk / 6.0 # update markers for the next stage markers[ip, 0:3] = ( markers[ip, first_init_idx : first_init_idx + 3] - + dt * k * a[stage] - + last * markers[ip, first_free_idx : first_free_idx + 3] + + dt * k / 2 * cont + + dt * markers[ip, first_free_idx : first_free_idx + 3] * last ) @stack_array("dfm", "dfinv", "dfinv_t", "ginv", "v", "u", "k", "k_v") -def push_pc_eta_stage_H1vec( +def push_pc_eta_rk4_H1vec( dt: float, stage: int, args_markers: "MarkerArguments", @@ -2200,10 +2602,6 @@ def push_pc_eta_stage_H1vec( u_1: "float[:,:,:]", u_2: "float[:,:,:]", u_3: "float[:,:,:]", - use_perp_model: "bool", - a: "float[:]", - b: "float[:]", - c: "float[:]", ): r"""Fourth order Runge-Kutta solve of @@ -2248,13 +2646,22 @@ def push_pc_eta_stage_H1vec( first_init_idx = args_markers.first_init_idx first_free_idx = args_markers.first_free_idx - # get number of stages - n_stages = shape(b)[0] + # assign factor of k for each stage + if stage == 0 or stage == 3: + nk = 1.0 + else: + nk = 2.0 - if stage == n_stages - 1: + # which stage + if stage == 3: last = 1.0 + cont = 0.0 + elif stage == 2: + last = 0.0 + cont = 2.0 else: last = 0.0 + cont = 1.0 for ip in range(n_markers): # only do something if particle is a "true" particle (i.e. not a hole) @@ -2298,21 +2705,19 @@ def push_pc_eta_stage_H1vec( u_3, u, ) - - if use_perp_model: - u[2] = 0.0 + u[2] = 0.0 # sum contribs k[:] = k_v + u # accum k - markers[ip, first_free_idx : first_free_idx + 3] += dt * b[stage] * k + markers[ip, first_free_idx : first_free_idx + 3] += k * nk / 6.0 # update markers for the next stage markers[ip, 0:3] = ( markers[ip, first_init_idx : first_init_idx + 3] - + dt * k * a[stage] - + last * markers[ip, first_free_idx : first_free_idx + 3] + + dt * k / 2 * cont + + dt * markers[ip, first_free_idx : first_free_idx + 3] * last ) @@ -2630,7 +3035,7 @@ def push_v_sph_pressure( h1, h2, h3 : float Kernel width in respective dimension. - gravity: xp.ndarray + gravity: np.ndarray Constant gravitational force as 3-vector. """ # allocate arrays @@ -2861,7 +3266,7 @@ def push_v_sph_pressure_ideal_gas( h1, h2, h3 : float Kernel width in respective dimension. - gravity: xp.ndarray + gravity: np.ndarray Constant gravitational force as 3-vector. """ # allocate arrays @@ -3093,7 +3498,7 @@ def push_v_viscosity( h1, h2, h3 : float Kernel width in respective dimension. - gravity: xp.ndarray + gravity: np.ndarray Constant gravitational force as 3-vector. """ # allocate arrays diff --git a/src/struphy/pic/pushing/pusher_kernels_gc.py b/src/struphy/pic/pushing/pusher_kernels_gc.py index 5dfee707b..0b6c9b3c7 100644 --- a/src/struphy/pic/pushing/pusher_kernels_gc.py +++ b/src/struphy/pic/pushing/pusher_kernels_gc.py @@ -1896,7 +1896,7 @@ def push_gc_cc_J1_H1vec( ) # b_star; in H1vec - b_star[:] = b + curl_norm_b * v * epsilon + b_star[:] = (b + curl_norm_b * v * epsilon) / det_df # calculate abs_b_star_para abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) @@ -1905,7 +1905,7 @@ def push_gc_cc_J1_H1vec( linalg_kernels.cross(b, u, e) # curl_norm_b dot electric field - temp = linalg_kernels.scalar_dot(e, curl_norm_b) + temp = linalg_kernels.scalar_dot(e, curl_norm_b) / det_df markers[ip, 3] += temp / abs_b_star_para * v * dt @@ -2077,6 +2077,7 @@ def push_gc_cc_J1_Hdiv( u1: "float[:,:,:]", u2: "float[:,:,:]", u3: "float[:,:,:]", + boundary_cut: float, ): r"""Velocity update step for the `CurrentCoupling5DCurlb `_ @@ -2104,6 +2105,8 @@ def push_gc_cc_J1_Hdiv( markers = args_markers.markers n_markers = args_markers.n_markers + # -- removed omp: #$ omp parallel private(ip, boundary_cut, eta1, eta2, eta3, v, det_df, dfm, span1, span2, span3, b, u, e, curl_norm_b, norm_b1, b_star, temp, abs_b_star_para) + # -- removed omp: #$ omp for for ip in range(n_markers): # only do something if particle is a "true" particle (i.e. not a hole) if markers[ip, 0] == -1.0: @@ -2114,6 +2117,9 @@ def push_gc_cc_J1_Hdiv( eta3 = markers[ip, 2] v = markers[ip, 3] + if eta1 < boundary_cut or eta1 > 1.0 - boundary_cut: + continue + # evaluate Jacobian, result in dfm evaluation_kernels.df( eta1, @@ -2177,10 +2183,10 @@ def push_gc_cc_J1_Hdiv( curl_norm_b, ) - # b_star; 2form - b_star[:] = b + curl_norm_b * v * epsilon + # b_star; 2form in H1vec + b_star[:] = (b + curl_norm_b * v * epsilon) / det_df - # calculate 3form abs_b_star_para + # calculate abs_b_star_para abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) # transform u into H1vec @@ -2190,10 +2196,12 @@ def push_gc_cc_J1_Hdiv( linalg_kernels.cross(b, u, e) # curl_norm_b dot electric field - temp = linalg_kernels.scalar_dot(e, curl_norm_b) + temp = linalg_kernels.scalar_dot(e, curl_norm_b) / det_df markers[ip, 3] += temp / abs_b_star_para * v * dt + # -- removed omp: #$ omp end parallel + @stack_array( "dfm", @@ -2204,11 +2212,13 @@ def push_gc_cc_J1_Hdiv( "u", "bb", "b_star", - "norm_b", + "norm_b1", + "norm_b2", "curl_norm_b", - "tmp", + "tmp1", + "tmp2", "b_prod", - "norm_b_prod", + "norm_b2_prod", ) def push_gc_cc_J2_stage_H1vec( dt: float, @@ -2223,6 +2233,9 @@ def push_gc_cc_J2_stage_H1vec( norm_b11: "float[:,:,:]", norm_b12: "float[:,:,:]", norm_b13: "float[:,:,:]", + norm_b21: "float[:,:,:]", + norm_b22: "float[:,:,:]", + norm_b23: "float[:,:,:]", curl_norm_b1: "float[:,:,:]", curl_norm_b2: "float[:,:,:]", curl_norm_b3: "float[:,:,:]", @@ -2251,14 +2264,16 @@ def push_gc_cc_J2_stage_H1vec( g_inv = empty((3, 3), dtype=float) # containers for fields - tmp = empty((3, 3), dtype=float) + tmp1 = empty((3, 3), dtype=float) + tmp2 = empty((3, 3), dtype=float) b_prod = zeros((3, 3), dtype=float) - norm_b_prod = empty((3, 3), dtype=float) + norm_b2_prod = empty((3, 3), dtype=float) e = empty(3, dtype=float) u = empty(3, dtype=float) bb = empty(3, dtype=float) b_star = empty(3, dtype=float) norm_b1 = empty(3, dtype=float) + norm_b2 = empty(3, dtype=float) curl_norm_b = empty(3, dtype=float) # get marker arguments @@ -2339,6 +2354,18 @@ def push_gc_cc_J2_stage_H1vec( norm_b1, ) + # norm_b; 2form + eval_2form_spline_mpi( + span1, + span2, + span3, + args_derham, + norm_b21, + norm_b22, + norm_b23, + norm_b2, + ) + # curl_norm_b; 2form eval_2form_spline_mpi( span1, @@ -2359,21 +2386,24 @@ def push_gc_cc_J2_stage_H1vec( b_prod[2, 0] = -bb[1] b_prod[2, 1] = +bb[0] - norm_b_prod[0, 1] = -norm_b1[2] - norm_b_prod[0, 2] = +norm_b1[1] - norm_b_prod[1, 0] = +norm_b1[2] - norm_b_prod[1, 2] = -norm_b1[0] - norm_b_prod[2, 0] = -norm_b1[1] - norm_b_prod[2, 1] = +norm_b1[0] + norm_b2_prod[0, 1] = -norm_b2[2] + norm_b2_prod[0, 2] = +norm_b2[1] + norm_b2_prod[1, 0] = +norm_b2[2] + norm_b2_prod[1, 2] = -norm_b2[0] + norm_b2_prod[2, 0] = -norm_b2[1] + norm_b2_prod[2, 1] = +norm_b2[0] # b_star; 2form in H1vec - b_star[:] = bb + curl_norm_b * v * epsilon + b_star[:] = (bb + curl_norm_b * v * epsilon) / det_df - # calculate 3form abs_b_star_para + # calculate abs_b_star_para abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) - linalg_kernels.matrix_matrix(norm_b_prod, b_prod, tmp) - linalg_kernels.matrix_vector(tmp, u, e) + linalg_kernels.matrix_matrix(g_inv, norm_b2_prod, tmp1) + linalg_kernels.matrix_matrix(tmp1, g_inv, tmp2) + linalg_kernels.matrix_matrix(tmp2, b_prod, tmp1) + + linalg_kernels.matrix_vector(tmp1, u, e) e /= abs_b_star_para @@ -2398,10 +2428,12 @@ def push_gc_cc_J2_stage_H1vec( "bb", "b_star", "norm_b1", + "norm_b2", "curl_norm_b", - "tmp", + "tmp1", + "tmp2", "b_prod", - "norm_b_prod", + "norm_b2_prod", ) def push_gc_cc_J2_stage_Hdiv( dt: float, @@ -2416,6 +2448,9 @@ def push_gc_cc_J2_stage_Hdiv( norm_b11: "float[:,:,:]", norm_b12: "float[:,:,:]", norm_b13: "float[:,:,:]", + norm_b21: "float[:,:,:]", + norm_b22: "float[:,:,:]", + norm_b23: "float[:,:,:]", curl_norm_b1: "float[:,:,:]", curl_norm_b2: "float[:,:,:]", curl_norm_b3: "float[:,:,:]", @@ -2425,6 +2460,7 @@ def push_gc_cc_J2_stage_Hdiv( a: "float[:]", b: "float[:]", c: "float[:]", + boundary_cut: float, ): r"""Single stage of a s-stage explicit pushing step for the `CurrentCoupling5DGradB `_ @@ -2444,14 +2480,16 @@ def push_gc_cc_J2_stage_Hdiv( g_inv = empty((3, 3), dtype=float) # containers for fields - tmp = zeros((3, 3), dtype=float) + tmp1 = zeros((3, 3), dtype=float) + tmp2 = zeros((3, 3), dtype=float) b_prod = zeros((3, 3), dtype=float) - norm_b_prod = zeros((3, 3), dtype=float) + norm_b2_prod = zeros((3, 3), dtype=float) e = empty(3, dtype=float) u = empty(3, dtype=float) bb = empty(3, dtype=float) b_star = empty(3, dtype=float) norm_b1 = empty(3, dtype=float) + norm_b2 = empty(3, dtype=float) curl_norm_b = empty(3, dtype=float) # get marker arguments @@ -2469,6 +2507,8 @@ def push_gc_cc_J2_stage_Hdiv( else: last = 0.0 + # -- removed omp: #$ omp parallel firstprivate(b_prod, norm_b2_prod) private(ip, boundary_cut, eta1, eta2, eta3, v, det_df, dfm, df_inv, df_inv_t, g_inv, span1, span2, span3, bb, u, e, curl_norm_b, norm_b1, norm_b2, b_star, tmp1, tmp2, abs_b_star_para) + # -- removed omp: #$ omp for for ip in range(n_markers): # check if marker is a hole if markers[ip, first_init_idx] == -1.0: @@ -2479,6 +2519,9 @@ def push_gc_cc_J2_stage_Hdiv( eta3 = markers[ip, 2] v = markers[ip, 3] + if eta1 < boundary_cut or eta2 > 1.0 - boundary_cut: + continue + # evaluate Jacobian, result in dfm evaluation_kernels.df( eta1, @@ -2533,178 +2576,16 @@ def push_gc_cc_J2_stage_Hdiv( norm_b1, ) - # curl_norm_b; 2form + # norm_b; 2form eval_2form_spline_mpi( span1, span2, span3, args_derham, - curl_norm_b1, - curl_norm_b2, - curl_norm_b3, - curl_norm_b, - ) - - # operator bx() as matrix - b_prod[0, 1] = -bb[2] - b_prod[0, 2] = +bb[1] - b_prod[1, 0] = +bb[2] - b_prod[1, 2] = -bb[0] - b_prod[2, 0] = -bb[1] - b_prod[2, 1] = +bb[0] - - norm_b_prod[0, 1] = -norm_b1[2] - norm_b_prod[0, 2] = +norm_b1[1] - norm_b_prod[1, 0] = +norm_b1[2] - norm_b_prod[1, 2] = -norm_b1[0] - norm_b_prod[2, 0] = -norm_b1[1] - norm_b_prod[2, 1] = +norm_b1[0] - - # b_star; 2form - b_star[:] = bb + curl_norm_b * v * epsilon - - # calculate abs_b_star_para - abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) - - linalg_kernels.matrix_matrix(norm_b_prod, b_prod, tmp) - linalg_kernels.matrix_vector(tmp, u, e) - - e /= abs_b_star_para - e /= det_df - - # accumulation for last stage - markers[ip, first_free_idx : first_free_idx + 3] -= dt * b[stage] * e - - # update positions for intermediate stages or last stage - markers[ip, 0:3] = ( - markers[ip, first_init_idx : first_init_idx + 3] - - dt * a[stage] * e - + last * markers[ip, first_free_idx : first_free_idx + 3] - ) - - -@stack_array( - "dfm", - "df_inv", - "df_inv_t", - "g_inv", - "e", - "u", - "bb", - "b_star", - "norm_b1", - "curl_norm_b", - "tmp1", - "b_prod", - "norm_b_prod", -) -def push_gc_cc_J2_dg_init_Hdiv( - dt: float, - args_markers: "MarkerArguments", - args_domain: "DomainArguments", - args_derham: "DerhamArguments", - epsilon: float, - b1: "float[:,:,:]", - b2: "float[:,:,:]", - b3: "float[:,:,:]", - norm_b11: "float[:,:,:]", - norm_b12: "float[:,:,:]", - norm_b13: "float[:,:,:]", - curl_norm_b1: "float[:,:,:]", - curl_norm_b2: "float[:,:,:]", - curl_norm_b3: "float[:,:,:]", - u1: "float[:,:,:]", - u2: "float[:,:,:]", - u3: "float[:,:,:]", -): - r"""TODO""" - - # allocate metric coeffs - dfm = empty((3, 3), dtype=float) - df_inv = empty((3, 3), dtype=float) - df_inv_t = empty((3, 3), dtype=float) - g_inv = empty((3, 3), dtype=float) - - # containers for fields - tmp1 = zeros((3, 3), dtype=float) - b_prod = zeros((3, 3), dtype=float) - norm_b_prod = zeros((3, 3), dtype=float) - e = empty(3, dtype=float) - u = empty(3, dtype=float) - bb = empty(3, dtype=float) - b_star = empty(3, dtype=float) - norm_b1 = empty(3, dtype=float) - curl_norm_b = empty(3, dtype=float) - - # get marker arguments - markers = args_markers.markers - n_markers = args_markers.n_markers - mu_idx = args_markers.mu_idx - first_init_idx = args_markers.first_init_idx - first_free_idx = args_markers.first_free_idx - - for ip in range(n_markers): - # check if marker is a hole - if markers[ip, first_init_idx] == -1.0: - continue - - eta1 = markers[ip, 0] - eta2 = markers[ip, 1] - eta3 = markers[ip, 2] - v = markers[ip, 3] - - # evaluate Jacobian, result in dfm - evaluation_kernels.df( - eta1, - eta2, - eta3, - args_domain, - dfm, - ) - - # metric coeffs - det_df = linalg_kernels.det(dfm) - linalg_kernels.matrix_inv_with_det(dfm, det_df, df_inv) - linalg_kernels.transpose(df_inv, df_inv_t) - linalg_kernels.matrix_matrix(df_inv, df_inv_t, g_inv) - - # spline evaluation - span1, span2, span3 = get_spans(eta1, eta2, eta3, args_derham) - - # b; 2form - eval_2form_spline_mpi( - span1, - span2, - span3, - args_derham, - b1, - b2, - b3, - bb, - ) - - # u; 2form - eval_2form_spline_mpi( - span1, - span2, - span3, - args_derham, - u1, - u2, - u3, - u, - ) - - # norm_b1; 1form - eval_1form_spline_mpi( - span1, - span2, - span3, - args_derham, - norm_b11, - norm_b12, - norm_b13, - norm_b1, + norm_b21, + norm_b22, + norm_b23, + norm_b2, ) # curl_norm_b; 2form @@ -2727,222 +2608,36 @@ def push_gc_cc_J2_dg_init_Hdiv( b_prod[2, 0] = -bb[1] b_prod[2, 1] = +bb[0] - norm_b_prod[0, 1] = -norm_b1[2] - norm_b_prod[0, 2] = +norm_b1[1] - norm_b_prod[1, 0] = +norm_b1[2] - norm_b_prod[1, 2] = -norm_b1[0] - norm_b_prod[2, 0] = -norm_b1[1] - norm_b_prod[2, 1] = +norm_b1[0] + norm_b2_prod[0, 1] = -norm_b2[2] + norm_b2_prod[0, 2] = +norm_b2[1] + norm_b2_prod[1, 0] = +norm_b2[2] + norm_b2_prod[1, 2] = -norm_b2[0] + norm_b2_prod[2, 0] = -norm_b2[1] + norm_b2_prod[2, 1] = +norm_b2[0] - # b_star; 2form - b_star[:] = bb + curl_norm_b * v * epsilon + # b_star; 2form in H1vec + b_star[:] = (bb + curl_norm_b * v * epsilon) / det_df - # calculate 3form abs_b_star_para + # calculate abs_b_star_para abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) - linalg_kernels.matrix_matrix(norm_b_prod, b_prod, tmp1) + linalg_kernels.matrix_matrix(g_inv, norm_b2_prod, tmp1) + linalg_kernels.matrix_matrix(tmp1, g_inv, tmp2) + linalg_kernels.matrix_matrix(tmp2, b_prod, tmp1) + linalg_kernels.matrix_vector(tmp1, u, e) e /= abs_b_star_para e /= det_df - markers[ip, 0:3] -= dt * e - - -@stack_array( - "dfm", - "df_inv", - "df_inv_t", - "g_inv", - "e", - "u", - "ud", - "bb", - "b_star", - "norm_b1", - "curl_norm_b", - "tmp1", - "tmp2", - "b_prod", - "norm_b_prod", - "eta_old", - "eta_mid", -) -def push_gc_cc_J2_dg_Hdiv( - dt: float, - args_markers: "MarkerArguments", - args_domain: "DomainArguments", - args_derham: "DerhamArguments", - epsilon: float, - b1: "float[:,:,:]", - b2: "float[:,:,:]", - b3: "float[:,:,:]", - norm_b11: "float[:,:,:]", - norm_b12: "float[:,:,:]", - norm_b13: "float[:,:,:]", - curl_norm_b1: "float[:,:,:]", - curl_norm_b2: "float[:,:,:]", - curl_norm_b3: "float[:,:,:]", - u1: "float[:,:,:]", - u2: "float[:,:,:]", - u3: "float[:,:,:]", - ud1: "float[:,:,:]", - ud2: "float[:,:,:]", - ud3: "float[:,:,:]", - const: float, - alpha: float, -): - r"""TODO""" - - # allocate metric coeffs - dfm = empty((3, 3), dtype=float) - df_inv = empty((3, 3), dtype=float) - df_inv_t = empty((3, 3), dtype=float) - g_inv = empty((3, 3), dtype=float) - - # containers for fields - tmp1 = zeros((3, 3), dtype=float) - tmp2 = zeros(3, dtype=float) - b_prod = zeros((3, 3), dtype=float) - norm_b_prod = zeros((3, 3), dtype=float) - e = empty(3, dtype=float) - u = empty(3, dtype=float) - ud = empty(3, dtype=float) - bb = empty(3, dtype=float) - b_star = empty(3, dtype=float) - norm_b1 = empty(3, dtype=float) - curl_norm_b = empty(3, dtype=float) - eta_old = empty(3, dtype=float) - eta_mid = empty(3, dtype=float) - - # get marker arguments - markers = args_markers.markers - n_markers = args_markers.n_markers - mu_idx = args_markers.mu_idx - first_init_idx = args_markers.first_init_idx - first_free_idx = args_markers.first_free_idx - - for ip in range(n_markers): - # check if marker is a hole - if markers[ip, 0] == -1.0: - continue - - # marker positions, mid point - eta_old[:] = markers[ip, 0:3] - eta_mid[:] = (markers[ip, 0:3] + markers[ip, first_init_idx : first_init_idx + 3]) / 2.0 - eta_mid[:] = mod(eta_mid[:], 1.0) - - v = markers[ip, 3] - - # evaluate Jacobian, result in dfm - evaluation_kernels.df( - eta_mid[0], - eta_mid[1], - eta_mid[2], - args_domain, - dfm, - ) - - # metric coeffs - det_df = linalg_kernels.det(dfm) - linalg_kernels.matrix_inv_with_det(dfm, det_df, df_inv) - linalg_kernels.transpose(df_inv, df_inv_t) - linalg_kernels.matrix_matrix(df_inv, df_inv_t, g_inv) - - # spline evaluation - span1, span2, span3 = get_spans(eta_mid[0], eta_mid[1], eta_mid[2], args_derham) - - # b; 2form - eval_2form_spline_mpi( - span1, - span2, - span3, - args_derham, - b1, - b2, - b3, - bb, - ) - - # u; 2form - eval_2form_spline_mpi( - span1, - span2, - span3, - args_derham, - u1, - u2, - u3, - u, - ) - - # ud; 2form - eval_2form_spline_mpi( - span1, - span2, - span3, - args_derham, - ud1, - ud2, - ud3, - ud, - ) - - # norm_b1; 1form - eval_1form_spline_mpi( - span1, - span2, - span3, - args_derham, - norm_b11, - norm_b12, - norm_b13, - norm_b1, - ) + # accumulation for last stage + markers[ip, first_free_idx : first_free_idx + 3] -= dt * b[stage] * e - # curl_norm_b; 2form - eval_2form_spline_mpi( - span1, - span2, - span3, - args_derham, - curl_norm_b1, - curl_norm_b2, - curl_norm_b3, - curl_norm_b, + # update positions for intermediate stages or last stage + markers[ip, 0:3] = ( + markers[ip, first_init_idx : first_init_idx + 3] + - dt * a[stage] * e + + last * markers[ip, first_free_idx : first_free_idx + 3] ) - # operator bx() as matrix - b_prod[0, 1] = -bb[2] - b_prod[0, 2] = +bb[1] - b_prod[1, 0] = +bb[2] - b_prod[1, 2] = -bb[0] - b_prod[2, 0] = -bb[1] - b_prod[2, 1] = +bb[0] - - norm_b_prod[0, 1] = -norm_b1[2] - norm_b_prod[0, 2] = +norm_b1[1] - norm_b_prod[1, 0] = +norm_b1[2] - norm_b_prod[1, 2] = -norm_b1[0] - norm_b_prod[2, 0] = -norm_b1[1] - norm_b_prod[2, 1] = +norm_b1[0] - - # b_star; 2form - b_star[:] = bb + curl_norm_b * v * epsilon - - # calculate 3form abs_b_star_para - abs_b_star_para = linalg_kernels.scalar_dot(norm_b1, b_star) - - linalg_kernels.matrix_matrix(norm_b_prod, b_prod, tmp1) - linalg_kernels.matrix_vector(tmp1, u, e) - linalg_kernels.matrix_vector(tmp1, ud, tmp2) - tmp2 *= const - - e += tmp2 - - e /= abs_b_star_para - e /= det_df - - markers[ip, 0:3] = markers[ip, first_init_idx : first_init_idx + 3] - dt * e - markers[ip, 0:3] *= alpha - markers[ip, 0:3] += eta_old * (1.0 - alpha) + # -- removed omp: #$ omp end parallel diff --git a/src/struphy/pic/sampling_kernels.py b/src/struphy/pic/sampling_kernels.py index ce68d5aff..821363a97 100644 --- a/src/struphy/pic/sampling_kernels.py +++ b/src/struphy/pic/sampling_kernels.py @@ -93,13 +93,13 @@ def tile_int_kernel( Parameters ---------- - fun: xp.ndarray + fun: np.ndarray The integrand evaluated at the quadrature points (meshgrid). - x_wts, y_wts, z_wts: xp.ndarray + x_wts, y_wts, z_wts: np.ndarray Quadrature weights for tile integral. - out: xp.ndarray + out: np.ndarray The result holding all tile integrals in one sorting box.""" _shp = shape(out) diff --git a/src/struphy/pic/sobol_seq.py b/src/struphy/pic/sobol_seq.py index f4c01347a..ff073b1b3 100644 --- a/src/struphy/pic/sobol_seq.py +++ b/src/struphy/pic/sobol_seq.py @@ -17,9 +17,10 @@ from __future__ import division -import cunumpy as xp from scipy.stats import norm +from struphy.utils.arrays import xp as np + __all__ = ["i4_bit_hi1", "i4_bit_lo0", "i4_sobol_generate", "i4_sobol", "i4_uniform", "prime_ge", "is_prime"] @@ -59,7 +60,7 @@ def i4_bit_hi1(n): Output, integer BIT, the number of bits base 2. """ - i = xp.floor(n) + i = np.floor(n) bit = 0 while i > 0: bit += 1 @@ -104,7 +105,7 @@ def i4_bit_lo0(n): Output, integer BIT, the position of the low 1 bit. """ bit = 1 - i = xp.floor(n) + i = np.floor(n) while i != 2 * (i // 2): bit += 1 i //= 2 @@ -122,7 +123,7 @@ def i4_sobol_generate(dim_num, n, skip=1): Output, real R(M,N), the points. """ - r = xp.full((n, dim_num), xp.nan) + r = np.full((n, dim_num), np.nan) for j in range(n): seed = j + skip r[j, 0:dim_num], next_seed = i4_sobol(dim_num, seed) @@ -221,8 +222,8 @@ def i4_sobol(dim_num, seed): seed_save = -1 # Initialize (part of) V. - v = xp.zeros((dim_max, log_max)) - v[0:40, 0] = xp.transpose( + v = np.zeros((dim_max, log_max)) + v[0:40, 0] = np.transpose( [ 1, 1, @@ -264,10 +265,10 @@ def i4_sobol(dim_num, seed): 1, 1, 1, - ], + ] ) - v[2:40, 1] = xp.transpose( + v[2:40, 1] = np.transpose( [ 1, 3, @@ -307,10 +308,10 @@ def i4_sobol(dim_num, seed): 3, 1, 3, - ], + ] ) - v[3:40, 2] = xp.transpose( + v[3:40, 2] = np.transpose( [ 7, 5, @@ -349,10 +350,10 @@ def i4_sobol(dim_num, seed): 1, 3, 3, - ], + ] ) - v[5:40, 3] = xp.transpose( + v[5:40, 3] = np.transpose( [ 1, 7, @@ -389,10 +390,10 @@ def i4_sobol(dim_num, seed): 1, 7, 9, - ], + ] ) - v[7:40, 4] = xp.transpose( + v[7:40, 4] = np.transpose( [ 9, 3, @@ -427,18 +428,18 @@ def i4_sobol(dim_num, seed): 9, 31, 9, - ], + ] ) - v[13:40, 5] = xp.transpose( - [37, 33, 7, 5, 11, 39, 63, 27, 17, 15, 23, 29, 3, 21, 13, 31, 25, 9, 49, 33, 19, 29, 11, 19, 27, 15, 25], + v[13:40, 5] = np.transpose( + [37, 33, 7, 5, 11, 39, 63, 27, 17, 15, 23, 29, 3, 21, 13, 31, 25, 9, 49, 33, 19, 29, 11, 19, 27, 15, 25] ) - v[19:40, 6] = xp.transpose( - [13, 33, 115, 41, 79, 17, 29, 119, 75, 73, 105, 7, 59, 65, 21, 3, 113, 61, 89, 45, 107], + v[19:40, 6] = np.transpose( + [13, 33, 115, 41, 79, 17, 29, 119, 75, 73, 105, 7, 59, 65, 21, 3, 113, 61, 89, 45, 107] ) - v[37:40, 7] = xp.transpose([7, 23, 39]) + v[37:40, 7] = np.transpose([7, 23, 39]) # Set POLY. poly = [ @@ -517,7 +518,7 @@ def i4_sobol(dim_num, seed): # Expand this bit pattern to separate components of the logical array INCLUD. j = poly[i - 1] - includ = xp.zeros(m) + includ = np.zeros(m) for k in range(m, 0, -1): j2 = j // 2 includ[k - 1] = j != 2 * j2 @@ -531,7 +532,7 @@ def i4_sobol(dim_num, seed): for k in range(1, m + 1): l *= 2 if includ[k - 1]: - newv = xp.bitwise_xor(int(newv), int(l * v[i - 1, j - k - 1])) + newv = np.bitwise_xor(int(newv), int(l * v[i - 1, j - k - 1])) v[i - 1, j - 1] = newv # Multiply columns of V by appropriate power of 2. @@ -542,16 +543,16 @@ def i4_sobol(dim_num, seed): # RECIPD is 1/(common denominator of the elements in V). recipd = 1.0 / (2 * l) - lastq = xp.zeros(dim_num) + lastq = np.zeros(dim_num) - seed = int(xp.floor(seed)) + seed = int(np.floor(seed)) if seed < 0: seed = 0 l = 1 if seed == 0: - lastq = xp.zeros(dim_num) + lastq = np.zeros(dim_num) elif seed == seed_save + 1: # Find the position of the right-hand zero in SEED. @@ -559,12 +560,12 @@ def i4_sobol(dim_num, seed): elif seed <= seed_save: seed_save = 0 - lastq = xp.zeros(dim_num) + lastq = np.zeros(dim_num) for seed_temp in range(int(seed_save), int(seed)): l = i4_bit_lo0(seed_temp) for i in range(1, dim_num + 1): - lastq[i - 1] = xp.bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1])) + lastq[i - 1] = np.bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1])) l = i4_bit_lo0(seed) @@ -572,7 +573,7 @@ def i4_sobol(dim_num, seed): for seed_temp in range(int(seed_save + 1), int(seed)): l = i4_bit_lo0(seed_temp) for i in range(1, dim_num + 1): - lastq[i - 1] = xp.bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1])) + lastq[i - 1] = np.bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1])) l = i4_bit_lo0(seed) @@ -585,10 +586,10 @@ def i4_sobol(dim_num, seed): return # Calculate the new components of QUASI. - quasi = xp.zeros(dim_num) + quasi = np.zeros(dim_num) for i in range(1, dim_num + 1): quasi[i - 1] = lastq[i - 1] * recipd - lastq[i - 1] = xp.bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1])) + lastq[i - 1] = np.bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1])) seed_save = seed seed += 1 @@ -638,11 +639,11 @@ def i4_uniform(a, b, seed): print("I4_UNIFORM - Fatal error!") print(" Input SEED = 0!") - seed = xp.floor(seed) + seed = np.floor(seed) a = round(a) b = round(b) - seed = xp.mod(seed, 2147483647) + seed = np.mod(seed, 2147483647) if seed < 0: seed += 2147483647 @@ -696,7 +697,7 @@ def prime_ge(n): Output, integer P, the smallest prime number that is greater than or equal to N. """ - p = max(xp.ceil(n), 2) + p = max(np.ceil(n), 2) while not is_prime(p): p += 1 @@ -720,7 +721,7 @@ def is_prime(n): return False # All primes >3 are of the form 6n+1 or 6n+5 (6n, 6n+2, 6n+4 are 2-divisible, 6n+3 is 3-divisible) p = 5 - root = int(xp.ceil(xp.sqrt(n))) + root = int(np.ceil(np.sqrt(n))) while p <= root: if n % p == 0 or n % (p + 2) == 0: return False diff --git a/src/struphy/pic/sph_eval_kernels.py b/src/struphy/pic/sph_eval_kernels.py index 4c63e0156..37414f447 100644 --- a/src/struphy/pic/sph_eval_kernels.py +++ b/src/struphy/pic/sph_eval_kernels.py @@ -297,19 +297,7 @@ def naive_evaluation_meshgrid( e2 = eta2[i, j, k] e3 = eta3[i, j, k] out[i, j, k] = naive_evaluation_kernel( - args_markers, - e1, - e2, - e3, - holes, - periodic1, - periodic2, - periodic3, - index, - kernel_type, - h1, - h2, - h3, + args_markers, e1, e2, e3, holes, periodic1, periodic2, periodic3, index, kernel_type, h1, h2, h3 ) diff --git a/src/struphy/pic/tests/test_accum_vec_H1.py b/src/struphy/pic/tests/test_accum_vec_H1.py index cb5cbb17e..f8de1b2fa 100644 --- a/src/struphy/pic/tests/test_accum_vec_H1.py +++ b/src/struphy/pic/tests/test_accum_vec_H1.py @@ -6,8 +6,7 @@ @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @pytest.mark.parametrize("p", [[2, 3, 4]]) @pytest.mark.parametrize( - "spl_kind", - [[False, False, True], [False, True, True], [True, False, True], [True, True, True]], + "spl_kind", [[False, False, True], [False, True, True], [True, False, True], [True, True, True]] ) @pytest.mark.parametrize( "mapping", @@ -48,7 +47,6 @@ def test_accum_poisson(Nel, p, spl_kind, mapping, num_clones, Np=1000): import copy - import cunumpy as xp from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI @@ -58,7 +56,7 @@ def test_accum_poisson(Nel, p, spl_kind, mapping, num_clones, Np=1000): from struphy.pic.accumulation import accum_kernels from struphy.pic.accumulation.particles_to_grid import AccumulatorVector from struphy.pic.particles import Particles6D - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np from struphy.utils.clone_config import CloneConfig if isinstance(MPI.COMM_WORLD, MockComm): @@ -77,7 +75,7 @@ def test_accum_poisson(Nel, p, spl_kind, mapping, num_clones, Np=1000): params = { "grid": {"Nel": Nel}, - "kinetic": {"test_particles": {"markers": {"Np": Np, "ppc": Np / xp.prod(Nel)}}}, + "kinetic": {"test_particles": {"markers": {"Np": Np, "ppc": Np / np.prod(Nel)}}}, } if mpi_comm is None: clone_config = None @@ -106,16 +104,17 @@ def test_accum_poisson(Nel, p, spl_kind, mapping, num_clones, Np=1000): print("Domain decomposition according to", derham.domain_array) # load distributed markers first and use Send/Receive to make global marker copies for the legacy routines - loading_params = LoadingParameters( - Np=Np, - seed=1607, - moments=(0.0, 0.0, 0.0, 1.0, 1.0, 1.0), - spatial="uniform", - ) + loading_params = { + "seed": 1607, + "moments": [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], + "spatial": "uniform", + } particles = Particles6D( comm_world=mpi_comm, clone_config=clone_config, + Np=Np, + bc=["periodic"] * 3, loading_params=loading_params, domain=domain, domain_decomp=domain_decomp, @@ -130,12 +129,12 @@ def test_accum_poisson(Nel, p, spl_kind, mapping, num_clones, Np=1000): _w0 = particles.weights print("Test weights:") - print(f"rank {mpi_rank}:", _w0.shape, xp.min(_w0), xp.max(_w0)) + print(f"rank {mpi_rank}:", _w0.shape, np.min(_w0), np.max(_w0)) _sqrtg = domain.jacobian_det(0.5, 0.5, 0.5) - assert xp.isclose(xp.min(_w0), _sqrtg) - assert xp.isclose(xp.max(_w0), _sqrtg) + assert np.isclose(np.min(_w0), _sqrtg) + assert np.isclose(np.max(_w0), _sqrtg) # mass operators mass_ops = WeightedMassOperators(derham, domain) @@ -149,31 +148,31 @@ def test_accum_poisson(Nel, p, spl_kind, mapping, num_clones, Np=1000): domain.args_domain, ) - acc() + acc(particles.vdim) # sum all MC integrals - _sum_within_clone = xp.empty(1, dtype=float) - _sum_within_clone[0] = xp.sum(acc.vectors[0].toarray()) + _sum_within_clone = np.empty(1, dtype=float) + _sum_within_clone[0] = np.sum(acc.vectors[0].toarray()) if clone_config is not None: clone_config.sub_comm.Allreduce(MPI.IN_PLACE, _sum_within_clone, op=MPI.SUM) - print(f"rank {mpi_rank}: {_sum_within_clone =}, {_sqrtg =}") + print(f"rank {mpi_rank}: {_sum_within_clone = }, {_sqrtg = }") # Check within clone - assert xp.isclose(_sum_within_clone, _sqrtg) + assert np.isclose(_sum_within_clone, _sqrtg) # Check for all clones - _sum_between_clones = xp.empty(1, dtype=float) - _sum_between_clones[0] = xp.sum(acc.vectors[0].toarray()) + _sum_between_clones = np.empty(1, dtype=float) + _sum_between_clones[0] = np.sum(acc.vectors[0].toarray()) if mpi_comm is not None: mpi_comm.Allreduce(MPI.IN_PLACE, _sum_between_clones, op=MPI.SUM) clone_config.inter_comm.Allreduce(MPI.IN_PLACE, _sqrtg, op=MPI.SUM) - print(f"rank {mpi_rank}: {_sum_between_clones =}, {_sqrtg =}") + print(f"rank {mpi_rank}: {_sum_between_clones = }, {_sqrtg = }") # Check within clone - assert xp.isclose(_sum_between_clones, _sqrtg) + assert np.isclose(_sum_between_clones, _sqrtg) if __name__ == "__main__": diff --git a/src/struphy/pic/tests/test_accumulation.py b/src/struphy/pic/tests/test_accumulation.py index ed3a41ff4..f8591ca44 100644 --- a/src/struphy/pic/tests/test_accumulation.py +++ b/src/struphy/pic/tests/test_accumulation.py @@ -6,8 +6,7 @@ @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @pytest.mark.parametrize("p", [[2, 3, 4]]) @pytest.mark.parametrize( - "spl_kind", - [[False, False, True], [False, True, False], [True, False, True], [True, True, False]], + "spl_kind", [[False, False, True], [False, True, False], [True, False, True], [True, True, False]] ) @pytest.mark.parametrize( "mapping", @@ -49,7 +48,6 @@ def test_accumulation(Nel, p, spl_kind, mapping, Np=40, verbose=False): def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): from time import time - import cunumpy as xp from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI @@ -62,7 +60,7 @@ def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): from struphy.pic.accumulation.particles_to_grid import Accumulator from struphy.pic.particles import Particles6D from struphy.pic.tests.test_pic_legacy_files.accumulation_kernels_3d import kernel_step_ph_full - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np if isinstance(MPI.COMM_WORLD, MockComm): mpi_comm = None @@ -93,10 +91,12 @@ def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): print(derham.domain_array) # load distributed markers first and use Send/Receive to make global marker copies for the legacy routines - loading_params = LoadingParameters(Np=Np, seed=1607, moments=(0.0, 0.0, 0.0, 1.0, 2.0, 3.0), spatial="uniform") + loading_params = {"seed": 1607, "moments": [0.0, 0.0, 0.0, 1.0, 2.0, 3.0], "spatial": "uniform"} particles = Particles6D( comm_world=mpi_comm, + Np=Np, + bc=["periodic"] * 3, loading_params=loading_params, domain=domain, domain_decomp=domain_decomp, @@ -108,17 +108,17 @@ def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): particles.markers[ ~particles.holes, 6, - ] = xp.random.rand(particles.n_mks_loc) + ] = np.random.rand(particles.n_mks_loc) # gather all particles for legacy kernel if mpi_comm is None: - marker_shapes = xp.array([particles.markers.shape[0]]) + marker_shapes = np.array([particles.markers.shape[0]]) else: - marker_shapes = xp.zeros(mpi_size, dtype=int) - mpi_comm.Allgather(xp.array([particles.markers.shape[0]]), marker_shapes) + marker_shapes = np.zeros(mpi_size, dtype=int) + mpi_comm.Allgather(np.array([particles.markers.shape[0]]), marker_shapes) print(rank, marker_shapes) - particles_leg = xp.zeros( + particles_leg = np.zeros( (sum(marker_shapes), particles.markers.shape[1]), dtype=float, ) @@ -129,7 +129,7 @@ def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): cumulative_lengths = marker_shapes[0] for i in range(1, mpi_size): - arr_recv = xp.zeros( + arr_recv = np.zeros( (marker_shapes[i], particles.markers.shape[1]), dtype=float, ) @@ -162,10 +162,10 @@ def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): for a in range(3): Ni = SPACES.Nbase_1form[a] - vec[a] = xp.zeros((Ni[0], Ni[1], Ni[2], 3), dtype=float) + vec[a] = np.zeros((Ni[0], Ni[1], Ni[2], 3), dtype=float) for b in range(3): - mat[a][b] = xp.zeros( + mat[a][b] = np.zeros( ( Ni[0], Ni[1], @@ -187,21 +187,21 @@ def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): SPACES.T[0], SPACES.T[1], SPACES.T[2], - xp.array(SPACES.p), - xp.array(Nel), - xp.array(SPACES.NbaseN), - xp.array(SPACES.NbaseD), + np.array(SPACES.p), + np.array(Nel), + np.array(SPACES.NbaseN), + np.array(SPACES.NbaseD), particles_leg.shape[0], domain.kind_map, domain.params_numpy, domain.T[0], domain.T[1], domain.T[2], - xp.array(domain.p), - xp.array( + np.array(domain.p), + np.array( domain.Nel, ), - xp.array(domain.NbaseN), + np.array(domain.NbaseN), domain.cx, domain.cy, domain.cz, @@ -218,7 +218,7 @@ def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): ) end_time = time() - tot_time = xp.round(end_time - start_time, 3) + tot_time = np.round(end_time - start_time, 3) mat[0][0] /= Np mat[0][1] /= Np @@ -248,12 +248,10 @@ def pc_lin_mhd_6d_step_ph_full(Nel, p, spl_kind, mapping, Np, verbose=False): ) start_time = time() - ACC( - 1.0, - ) + ACC(1.0, 1.0, 0.0) end_time = time() - tot_time = xp.round(end_time - start_time, 3) + tot_time = np.round(end_time - start_time, 3) if rank == 0 and verbose: print(f"Step ph New took {tot_time} seconds.") diff --git a/src/struphy/pic/tests/test_binning.py b/src/struphy/pic/tests/test_binning.py index cda2524e7..a5457c3df 100644 --- a/src/struphy/pic/tests/test_binning.py +++ b/src/struphy/pic/tests/test_binning.py @@ -35,19 +35,13 @@ def test_binning_6D_full_f(mapping, show_plot=False): name and specification of the mapping """ - import cunumpy as xp import matplotlib.pyplot as plt from psydac.ddm.mpi import mpi as MPI from struphy.geometry import domains - from struphy.initial import perturbations from struphy.kinetic_background.maxwellians import Maxwellian3D from struphy.pic.particles import Particles6D - from struphy.pic.utilities import ( - BoundaryParameters, - LoadingParameters, - WeightsParameters, - ) + from struphy.utils.arrays import xp as np # Set seed seed = 1234 @@ -60,17 +54,19 @@ def test_binning_6D_full_f(mapping, show_plot=False): domain = domain_class(**mapping[1]) # create particles - bc_params = ("periodic", "periodic", "periodic") + loading_params = { + "seed": seed, + "spatial": "uniform", + } + bc_params = ["periodic", "periodic", "periodic"] # =========================================== # ===== Test Maxwellian in v1 direction ===== # =========================================== - loading_params = LoadingParameters(Np=Np, seed=seed, spatial="uniform") - boundary_params = BoundaryParameters(bc=bc_params) - particles = Particles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, domain=domain, ) @@ -79,7 +75,7 @@ def test_binning_6D_full_f(mapping, show_plot=False): # test weights particles.initialize_weights() - v1_bins = xp.linspace(-5.0, 5.0, 200, endpoint=True) + v1_bins = np.linspace(-5.0, 5.0, 200, endpoint=True) dv = v1_bins[1] - v1_bins[0] binned_res, r2 = particles.binning( @@ -89,7 +85,7 @@ def test_binning_6D_full_f(mapping, show_plot=False): v1_plot = v1_bins[:-1] + dv / 2 - ana_res = 1.0 / xp.sqrt(2.0 * xp.pi) * xp.exp(-(v1_plot**2) / 2.0) + ana_res = 1.0 / np.sqrt(2.0 * np.pi) * np.exp(-(v1_plot**2) / 2.0) if show_plot: plt.plot(v1_plot, ana_res, label="Analytical result") @@ -100,7 +96,7 @@ def test_binning_6D_full_f(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - binned_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - binned_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.02, f"Error between binned data and analytical result was {l2_error}" @@ -110,19 +106,27 @@ def test_binning_6D_full_f(mapping, show_plot=False): # test weights amp_n = 0.1 l_n = 2 - pert = perturbations.ModesCos(ls=(l_n,), amps=(amp_n,)) - maxwellian = Maxwellian3D(n=(1.0, pert)) + pert_params = { + "n": { + "ModesCos": { + "given_in_basis": "0", + "ls": [l_n], + "amps": [amp_n], + } + } + } particles = Particles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, domain=domain, - background=maxwellian, + pert_params=pert_params, ) particles.draw_markers() particles.initialize_weights() - e1_bins = xp.linspace(0.0, 1.0, 200, endpoint=True) + e1_bins = np.linspace(0.0, 1.0, 200, endpoint=True) de = e1_bins[1] - e1_bins[0] binned_res, r2 = particles.binning( @@ -132,7 +136,7 @@ def test_binning_6D_full_f(mapping, show_plot=False): e1_plot = e1_bins[:-1] + de / 2 - ana_res = 1.0 + amp_n * xp.cos(2 * xp.pi * l_n * e1_plot) + ana_res = 1.0 + amp_n * np.cos(2 * np.pi * l_n * e1_plot) if show_plot: plt.plot(e1_plot, ana_res, label="Analytical result") @@ -143,46 +147,67 @@ def test_binning_6D_full_f(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - binned_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - binned_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.02, f"Error between binned data and analytical result was {l2_error}" # ============================================================== # ===== Test cosines for two backgrounds in eta1 direction ===== # ============================================================== + loading_params = { + "seed": seed, + "spatial": "uniform", + } n1 = 0.8 n2 = 0.2 - + bckgr_params = { + "Maxwellian3D_1": { + "n": n1, + }, + "Maxwellian3D_2": { + "n": n2, + "vth1": 0.5, + "u1": 4.5, + }, + } # test weights amp_n1 = 0.1 amp_n2 = 0.1 l_n1 = 2 l_n2 = 4 - - pert_1 = perturbations.ModesCos(ls=(l_n,), amps=(amp_n,)) - pert_2 = perturbations.ModesCos(ls=(l_n2,), amps=(amp_n2,)) - maxw_1 = Maxwellian3D(n=(n1, pert_1)) - maxw_2 = Maxwellian3D(n=(n2, pert_2), u1=(4.5, None), vth1=(0.5, None)) - background = maxw_1 + maxw_2 - - # adapt s0 for importance sampling - loading_params = LoadingParameters( - Np=Np, - seed=seed, - spatial="uniform", - moments=(2.5, 0, 0, 3, 1, 1), - ) + pert_params = { + "Maxwellian3D_1": { + "n": { + "ModesCos": { + "given_in_basis": "0", + "ls": [l_n], + "amps": [amp_n], + } + } + }, + "Maxwellian3D_2": { + "n": { + "ModesCos": { + "given_in_basis": "0", + "ls": [l_n2], + "amps": [amp_n2], + } + } + }, + } particles = Particles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, domain=domain, - background=background, + bckgr_params=bckgr_params, + pert_params=pert_params, ) particles.draw_markers() particles.initialize_weights() - e1_bins = xp.linspace(0.0, 1.0, 200, endpoint=True) + e1_bins = np.linspace(0.0, 1.0, 200, endpoint=True) de = e1_bins[1] - e1_bins[0] binned_res, r2 = particles.binning( @@ -192,28 +217,29 @@ def test_binning_6D_full_f(mapping, show_plot=False): e1_plot = e1_bins[:-1] + de / 2 - ana_res = n1 + amp_n1 * xp.cos(2 * xp.pi * l_n1 * e1_plot) + n2 + amp_n2 * xp.cos(2 * xp.pi * l_n2 * e1_plot) + ana_res = n1 + amp_n1 * np.cos(2 * np.pi * l_n1 * e1_plot) + n2 + amp_n2 * np.cos(2 * np.pi * l_n2 * e1_plot) # Compare s0 and the sum of two Maxwellians if show_plot: - s0 = Maxwellian3D( - n=(1.0, None), - u1=(particles.loading_params.moments[0], None), - u2=(particles.loading_params.moments[1], None), - u3=(particles.loading_params.moments[2], None), - vth1=(particles.loading_params.moments[3], None), - vth2=(particles.loading_params.moments[4], None), - vth3=(particles.loading_params.moments[5], None), - ) - - v1 = xp.linspace(-10.0, 10.0, 400) - phase_space = xp.meshgrid( - xp.array([0.0]), - xp.array([0.0]), - xp.array([0.0]), + s0_dict = { + "n": 1.0, + "u1": particles.loading_params["moments"][0], + "u2": particles.loading_params["moments"][1], + "u3": particles.loading_params["moments"][2], + "vth1": particles.loading_params["moments"][3], + "vth2": particles.loading_params["moments"][4], + "vth3": particles.loading_params["moments"][5], + } + s0 = Maxwellian3D(maxw_params=s0_dict) + + v1 = np.linspace(-10.0, 10.0, 400) + phase_space = np.meshgrid( + np.array([0.0]), + np.array([0.0]), + np.array([0.0]), v1, - xp.array([0.0]), - xp.array([0.0]), + np.array([0.0]), + np.array([0.0]), ) s0_vals = s0(*phase_space).squeeze() @@ -235,7 +261,7 @@ def test_binning_6D_full_f(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - binned_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - binned_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.04, f"Error between binned data and analytical result was {l2_error}" @@ -268,19 +294,13 @@ def test_binning_6D_delta_f(mapping, show_plot=False): name and specification of the mapping """ - import cunumpy as xp import matplotlib.pyplot as plt from psydac.ddm.mpi import mpi as MPI from struphy.geometry import domains - from struphy.initial import perturbations from struphy.kinetic_background.maxwellians import Maxwellian3D from struphy.pic.particles import DeltaFParticles6D - from struphy.pic.utilities import ( - BoundaryParameters, - LoadingParameters, - WeightsParameters, - ) + from struphy.utils.arrays import xp as np # Set seed seed = 1234 @@ -293,30 +313,39 @@ def test_binning_6D_delta_f(mapping, show_plot=False): domain = domain_class(**mapping[1]) # create particles - bc_params = ("periodic", "periodic", "periodic") + loading_params = { + "seed": seed, + "spatial": "uniform", + } + bc_params = ["periodic", "periodic", "periodic"] # ========================================= # ===== Test cosine in eta1 direction ===== # ========================================= - loading_params = LoadingParameters(Np=Np, seed=seed, spatial="uniform") - boundary_params = BoundaryParameters(bc=bc_params) - # test weights amp_n = 0.1 l_n = 2 - pert = perturbations.ModesCos(ls=(l_n,), amps=(amp_n,)) - background = Maxwellian3D(n=(1.0, pert)) + pert_params = { + "n": { + "ModesCos": { + "given_in_basis": "0", + "ls": [l_n], + "amps": [amp_n], + }, + } + } particles = DeltaFParticles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, domain=domain, - background=background, + pert_params=pert_params, ) particles.draw_markers() particles.initialize_weights() - e1_bins = xp.linspace(0.0, 1.0, 200, endpoint=True) + e1_bins = np.linspace(0.0, 1.0, 200, endpoint=True) de = e1_bins[1] - e1_bins[0] binned_res, r2 = particles.binning( @@ -326,7 +355,7 @@ def test_binning_6D_delta_f(mapping, show_plot=False): e1_plot = e1_bins[:-1] + de / 2 - ana_res = amp_n * xp.cos(2 * xp.pi * l_n * e1_plot) + ana_res = amp_n * np.cos(2 * np.pi * l_n * e1_plot) if show_plot: plt.plot(e1_plot, ana_res, label="Analytical result") @@ -337,46 +366,69 @@ def test_binning_6D_delta_f(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - binned_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - binned_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.02, f"Error between binned data and analytical result was {l2_error}" # ============================================================== # ===== Test cosines for two backgrounds in eta1 direction ===== # ============================================================== + loading_params = { + "seed": seed, + "spatial": "uniform", + } n1 = 0.8 n2 = 0.2 - + bckgr_params = { + "Maxwellian3D_1": { + "n": n1, + }, + "Maxwellian3D_2": { + "n": n2, + "vth1": 0.5, + "u1": 4.5, + }, + } # test weights amp_n1 = 0.1 amp_n2 = 0.1 l_n1 = 2 l_n2 = 4 - - pert_1 = perturbations.ModesCos(ls=(l_n,), amps=(amp_n,)) - pert_2 = perturbations.ModesCos(ls=(l_n2,), amps=(amp_n2,)) - maxw_1 = Maxwellian3D(n=(n1, pert_1)) - maxw_2 = Maxwellian3D(n=(n2, pert_2), u1=(4.5, None), vth1=(0.5, None)) - background = maxw_1 + maxw_2 - - # adapt s0 for importance sampling - loading_params = LoadingParameters( - Np=Np, - seed=seed, - spatial="uniform", - moments=(2.5, 0, 0, 2, 1, 1), - ) + pert_params = { + "Maxwellian3D_1": { + "use_background_n": False, + "n": { + "ModesCos": { + "given_in_basis": "0", + "ls": [l_n1], + "amps": [amp_n1], + } + }, + }, + "Maxwellian3D_2": { + "use_background_n": True, + "n": { + "ModesCos": { + "given_in_basis": "0", + "ls": [l_n2], + "amps": [amp_n2], + } + }, + }, + } particles = DeltaFParticles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, domain=domain, - background=background, + bckgr_params=bckgr_params, + pert_params=pert_params, ) particles.draw_markers() particles.initialize_weights() - e1_bins = xp.linspace(0.0, 1.0, 200, endpoint=True) + e1_bins = np.linspace(0.0, 1.0, 200, endpoint=True) de = e1_bins[1] - e1_bins[0] binned_res, r2 = particles.binning( @@ -386,28 +438,29 @@ def test_binning_6D_delta_f(mapping, show_plot=False): e1_plot = e1_bins[:-1] + de / 2 - ana_res = amp_n1 * xp.cos(2 * xp.pi * l_n1 * e1_plot) + amp_n2 * xp.cos(2 * xp.pi * l_n2 * e1_plot) + ana_res = amp_n1 * np.cos(2 * np.pi * l_n1 * e1_plot) + n2 + amp_n2 * np.cos(2 * np.pi * l_n2 * e1_plot) # Compare s0 and the sum of two Maxwellians if show_plot: - s0 = Maxwellian3D( - n=(1.0, None), - u1=(particles.loading_params.moments[0], None), - u2=(particles.loading_params.moments[1], None), - u3=(particles.loading_params.moments[2], None), - vth1=(particles.loading_params.moments[3], None), - vth2=(particles.loading_params.moments[4], None), - vth3=(particles.loading_params.moments[5], None), - ) - - v1 = xp.linspace(-10.0, 10.0, 400) - phase_space = xp.meshgrid( - xp.array([0.0]), - xp.array([0.0]), - xp.array([0.0]), + s0_dict = { + "n": 1.0, + "u1": particles.loading_params["moments"][0], + "u2": particles.loading_params["moments"][1], + "u3": particles.loading_params["moments"][2], + "vth1": particles.loading_params["moments"][3], + "vth2": particles.loading_params["moments"][4], + "vth3": particles.loading_params["moments"][5], + } + s0 = Maxwellian3D(maxw_params=s0_dict) + + v1 = np.linspace(-10.0, 10.0, 400) + phase_space = np.meshgrid( + np.array([0.0]), + np.array([0.0]), + np.array([0.0]), v1, - xp.array([0.0]), - xp.array([0.0]), + np.array([0.0]), + np.array([0.0]), ) s0_vals = s0(*phase_space).squeeze() @@ -429,7 +482,7 @@ def test_binning_6D_delta_f(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - binned_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - binned_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.04, f"Error between binned data and analytical result was {l2_error}" @@ -464,20 +517,14 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): name and specification of the mapping """ - import cunumpy as xp import matplotlib.pyplot as plt from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI from struphy.geometry import domains - from struphy.initial import perturbations from struphy.kinetic_background.maxwellians import Maxwellian3D from struphy.pic.particles import Particles6D - from struphy.pic.utilities import ( - BoundaryParameters, - LoadingParameters, - WeightsParameters, - ) + from struphy.utils.arrays import xp as np # Set seed seed = 1234 @@ -500,17 +547,19 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): rank = comm.Get_rank() # create particles - bc_params = ("periodic", "periodic", "periodic") + loading_params = { + "seed": seed, + "spatial": "uniform", + } + bc_params = ["periodic", "periodic", "periodic"] # =========================================== # ===== Test Maxwellian in v1 direction ===== # =========================================== - loading_params = LoadingParameters(Np=Np, seed=seed, spatial="uniform") - boundary_params = BoundaryParameters(bc=bc_params) - particles = Particles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, comm_world=comm, domain=domain, ) @@ -519,7 +568,7 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): # test weights particles.initialize_weights() - v1_bins = xp.linspace(-5.0, 5.0, 200, endpoint=True) + v1_bins = np.linspace(-5.0, 5.0, 200, endpoint=True) dv = v1_bins[1] - v1_bins[0] binned_res, r2 = particles.binning( @@ -531,13 +580,13 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): if comm is None: mpi_res = binned_res else: - mpi_res = xp.zeros_like(binned_res) + mpi_res = np.zeros_like(binned_res) comm.Allreduce(binned_res, mpi_res, op=MPI.SUM) comm.Barrier() v1_plot = v1_bins[:-1] + dv / 2 - ana_res = 1.0 / xp.sqrt(2.0 * xp.pi) * xp.exp(-(v1_plot**2) / 2.0) + ana_res = 1.0 / np.sqrt(2.0 * np.pi) * np.exp(-(v1_plot**2) / 2.0) if show_plot and rank == 0: plt.plot(v1_plot, ana_res, label="Analytical result") @@ -548,7 +597,7 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - mpi_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - mpi_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.03, f"Error between binned data and analytical result was {l2_error}" @@ -558,20 +607,28 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): # test weights amp_n = 0.1 l_n = 2 - pert = perturbations.ModesCos(ls=(l_n,), amps=(amp_n,)) - maxwellian = Maxwellian3D(n=(1.0, pert)) + pert_params = { + "n": { + "ModesCos": { + "given_in_basis": "0", + "ls": [l_n], + "amps": [amp_n], + } + } + } particles = Particles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, comm_world=comm, domain=domain, - background=maxwellian, + pert_params=pert_params, ) particles.draw_markers() particles.initialize_weights() - e1_bins = xp.linspace(0.0, 1.0, 200, endpoint=True) + e1_bins = np.linspace(0.0, 1.0, 200, endpoint=True) de = e1_bins[1] - e1_bins[0] binned_res, r2 = particles.binning( @@ -583,13 +640,13 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): if comm is None: mpi_res = binned_res else: - mpi_res = xp.zeros_like(binned_res) + mpi_res = np.zeros_like(binned_res) comm.Allreduce(binned_res, mpi_res, op=MPI.SUM) comm.Barrier() e1_plot = e1_bins[:-1] + de / 2 - ana_res = 1.0 + amp_n * xp.cos(2 * xp.pi * l_n * e1_plot) + ana_res = 1.0 + amp_n * np.cos(2 * np.pi * l_n * e1_plot) if show_plot and rank == 0: plt.plot(e1_plot, ana_res, label="Analytical result") @@ -600,13 +657,17 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - mpi_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - mpi_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.03, f"Error between binned data and analytical result was {l2_error}" # ============================================================== # ===== Test cosines for two backgrounds in eta1 direction ===== # ============================================================== + loading_params = { + "seed": seed, + "spatial": "uniform", + } n1 = 0.8 n2 = 0.2 bckgr_params = { @@ -631,8 +692,8 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): "given_in_basis": "0", "ls": [l_n1], "amps": [amp_n1], - }, - }, + } + } }, "Maxwellian3D_2": { "n": { @@ -640,35 +701,24 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): "given_in_basis": "0", "ls": [l_n2], "amps": [amp_n2], - }, - }, + } + } }, } - pert_1 = perturbations.ModesCos(ls=(l_n1,), amps=(amp_n1,)) - pert_2 = perturbations.ModesCos(ls=(l_n2,), amps=(amp_n2,)) - maxw_1 = Maxwellian3D(n=(n1, pert_1)) - maxw_2 = Maxwellian3D(n=(n2, pert_2), u1=(4.5, None), vth1=(0.5, None)) - background = maxw_1 + maxw_2 - - # adapt s0 for importance sampling - loading_params = LoadingParameters( - Np=Np, - seed=seed, - spatial="uniform", - moments=(2.5, 0, 0, 2, 1, 1), - ) particles = Particles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, comm_world=comm, domain=domain, - background=background, + bckgr_params=bckgr_params, + pert_params=pert_params, ) particles.draw_markers() particles.initialize_weights() - e1_bins = xp.linspace(0.0, 1.0, 200, endpoint=True) + e1_bins = np.linspace(0.0, 1.0, 200, endpoint=True) de = e1_bins[1] - e1_bins[0] binned_res, r2 = particles.binning( @@ -680,34 +730,35 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): if comm is None: mpi_res = binned_res else: - mpi_res = xp.zeros_like(binned_res) + mpi_res = np.zeros_like(binned_res) comm.Allreduce(binned_res, mpi_res, op=MPI.SUM) comm.Barrier() e1_plot = e1_bins[:-1] + de / 2 - ana_res = n1 + amp_n1 * xp.cos(2 * xp.pi * l_n1 * e1_plot) + n2 + amp_n2 * xp.cos(2 * xp.pi * l_n2 * e1_plot) + ana_res = n1 + amp_n1 * np.cos(2 * np.pi * l_n1 * e1_plot) + n2 + amp_n2 * np.cos(2 * np.pi * l_n2 * e1_plot) # Compare s0 and the sum of two Maxwellians if show_plot and rank == 0: - s0 = Maxwellian3D( - n=(1.0, None), - u1=(particles.loading_params.moments[0], None), - u2=(particles.loading_params.moments[1], None), - u3=(particles.loading_params.moments[2], None), - vth1=(particles.loading_params.moments[3], None), - vth2=(particles.loading_params.moments[4], None), - vth3=(particles.loading_params.moments[5], None), - ) - - v1 = xp.linspace(-10.0, 10.0, 400) - phase_space = xp.meshgrid( - xp.array([0.0]), - xp.array([0.0]), - xp.array([0.0]), + s0_dict = { + "n": 1.0, + "u1": particles.loading_params["moments"][0], + "u2": particles.loading_params["moments"][1], + "u3": particles.loading_params["moments"][2], + "vth1": particles.loading_params["moments"][3], + "vth2": particles.loading_params["moments"][4], + "vth3": particles.loading_params["moments"][5], + } + s0 = Maxwellian3D(maxw_params=s0_dict) + + v1 = np.linspace(-10.0, 10.0, 400) + phase_space = np.meshgrid( + np.array([0.0]), + np.array([0.0]), + np.array([0.0]), v1, - xp.array([0.0]), - xp.array([0.0]), + np.array([0.0]), + np.array([0.0]), ) s0_vals = s0(*phase_space).squeeze() @@ -729,7 +780,7 @@ def test_binning_6D_full_f_mpi(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - mpi_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - mpi_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.04, f"Error between binned data and analytical result was {l2_error}" @@ -761,20 +812,14 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): name and specification of the mapping """ - import cunumpy as xp import matplotlib.pyplot as plt from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI from struphy.geometry import domains - from struphy.initial import perturbations from struphy.kinetic_background.maxwellians import Maxwellian3D from struphy.pic.particles import DeltaFParticles6D - from struphy.pic.utilities import ( - BoundaryParameters, - LoadingParameters, - WeightsParameters, - ) + from struphy.utils.arrays import xp as np # Set seed seed = 1234 @@ -797,14 +842,15 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): rank = comm.Get_rank() # create particles - bc_params = ("periodic", "periodic", "periodic") + loading_params = { + "seed": seed, + "spatial": "uniform", + } + bc_params = ["periodic", "periodic", "periodic"] # ========================================= # ===== Test cosine in eta1 direction ===== # ========================================= - loading_params = LoadingParameters(Np=Np, seed=seed, spatial="uniform") - boundary_params = BoundaryParameters(bc=bc_params) - # test weights amp_n = 0.1 l_n = 2 @@ -814,23 +860,22 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): "given_in_basis": "0", "ls": [l_n], "amps": [amp_n], - }, - }, + } + } } - pert = perturbations.ModesCos(ls=(l_n,), amps=(amp_n,)) - background = Maxwellian3D(n=(1.0, pert)) particles = DeltaFParticles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, comm_world=comm, domain=domain, - background=background, + pert_params=pert_params, ) particles.draw_markers() particles.initialize_weights() - e1_bins = xp.linspace(0.0, 1.0, 200, endpoint=True) + e1_bins = np.linspace(0.0, 1.0, 200, endpoint=True) de = e1_bins[1] - e1_bins[0] binned_res, r2 = particles.binning( @@ -842,13 +887,13 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): if comm is None: mpi_res = binned_res else: - mpi_res = xp.zeros_like(binned_res) + mpi_res = np.zeros_like(binned_res) comm.Allreduce(binned_res, mpi_res, op=MPI.SUM) comm.Barrier() e1_plot = e1_bins[:-1] + de / 2 - ana_res = amp_n * xp.cos(2 * xp.pi * l_n * e1_plot) + ana_res = amp_n * np.cos(2 * np.pi * l_n * e1_plot) if show_plot and rank == 0: plt.plot(e1_plot, ana_res, label="Analytical result") @@ -859,13 +904,17 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - mpi_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - mpi_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.02, f"Error between binned data and analytical result was {l2_error}" # ============================================================== # ===== Test cosines for two backgrounds in eta1 direction ===== # ============================================================== + loading_params = { + "seed": seed, + "spatial": "uniform", + } n1 = 0.8 n2 = 0.2 bckgr_params = { @@ -891,7 +940,7 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): "given_in_basis": "0", "ls": [l_n1], "amps": [amp_n1], - }, + } }, }, "Maxwellian3D_2": { @@ -901,35 +950,24 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): "given_in_basis": "0", "ls": [l_n2], "amps": [amp_n2], - }, + } }, }, } - pert_1 = perturbations.ModesCos(ls=(l_n1,), amps=(amp_n1,)) - pert_2 = perturbations.ModesCos(ls=(l_n2,), amps=(amp_n2,)) - maxw_1 = Maxwellian3D(n=(n1, pert_1)) - maxw_2 = Maxwellian3D(n=(n2, pert_2), u1=(4.5, None), vth1=(0.5, None)) - background = maxw_1 + maxw_2 - - # adapt s0 for importance sampling - loading_params = LoadingParameters( - Np=Np, - seed=seed, - spatial="uniform", - moments=(2.5, 0, 0, 2, 1, 1), - ) particles = DeltaFParticles6D( + Np=Np, + bc=bc_params, loading_params=loading_params, - boundary_params=boundary_params, comm_world=comm, domain=domain, - background=background, + bckgr_params=bckgr_params, + pert_params=pert_params, ) particles.draw_markers() particles.initialize_weights() - e1_bins = xp.linspace(0.0, 1.0, 200, endpoint=True) + e1_bins = np.linspace(0.0, 1.0, 200, endpoint=True) de = e1_bins[1] - e1_bins[0] binned_res, r2 = particles.binning( @@ -941,34 +979,35 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): if comm is None: mpi_res = binned_res else: - mpi_res = xp.zeros_like(binned_res) + mpi_res = np.zeros_like(binned_res) comm.Allreduce(binned_res, mpi_res, op=MPI.SUM) comm.Barrier() e1_plot = e1_bins[:-1] + de / 2 - ana_res = amp_n1 * xp.cos(2 * xp.pi * l_n1 * e1_plot) + amp_n2 * xp.cos(2 * xp.pi * l_n2 * e1_plot) + ana_res = amp_n1 * np.cos(2 * np.pi * l_n1 * e1_plot) + n2 + amp_n2 * np.cos(2 * np.pi * l_n2 * e1_plot) # Compare s0 and the sum of two Maxwellians if show_plot and rank == 0: - s0 = Maxwellian3D( - n=(1.0, None), - u1=(particles.loading_params.moments[0], None), - u2=(particles.loading_params.moments[1], None), - u3=(particles.loading_params.moments[2], None), - vth1=(particles.loading_params.moments[3], None), - vth2=(particles.loading_params.moments[4], None), - vth3=(particles.loading_params.moments[5], None), - ) - - v1 = xp.linspace(-10.0, 10.0, 400) - phase_space = xp.meshgrid( - xp.array([0.0]), - xp.array([0.0]), - xp.array([0.0]), + s0_dict = { + "n": 1.0, + "u1": particles.loading_params["moments"][0], + "u2": particles.loading_params["moments"][1], + "u3": particles.loading_params["moments"][2], + "vth1": particles.loading_params["moments"][3], + "vth2": particles.loading_params["moments"][4], + "vth3": particles.loading_params["moments"][5], + } + s0 = Maxwellian3D(maxw_params=s0_dict) + + v1 = np.linspace(-10.0, 10.0, 400) + phase_space = np.meshgrid( + np.array([0.0]), + np.array([0.0]), + np.array([0.0]), v1, - xp.array([0.0]), - xp.array([0.0]), + np.array([0.0]), + np.array([0.0]), ) s0_vals = s0(*phase_space).squeeze() @@ -990,7 +1029,7 @@ def test_binning_6D_delta_f_mpi(mapping, show_plot=False): plt.legend() plt.show() - l2_error = xp.sqrt(xp.sum((ana_res - mpi_res) ** 2)) / xp.sqrt(xp.sum((ana_res) ** 2)) + l2_error = np.sqrt(np.sum((ana_res - mpi_res) ** 2)) / np.sqrt(np.sum((ana_res) ** 2)) assert l2_error <= 0.04, f"Error between binned data and analytical result was {l2_error}" diff --git a/src/struphy/pic/tests/test_draw_parallel.py b/src/struphy/pic/tests/test_draw_parallel.py index cf95f4dc7..5c1232465 100644 --- a/src/struphy/pic/tests/test_draw_parallel.py +++ b/src/struphy/pic/tests/test_draw_parallel.py @@ -35,13 +35,12 @@ def test_draw(Nel, p, spl_kind, mapping, ppc=10): """Asserts whether all particles are on the correct process after `particles.mpi_sort_markers()`.""" - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.feec.psydac_derham import Derham from struphy.geometry import domains from struphy.pic.particles import Particles6D - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -65,16 +64,17 @@ def test_draw(Nel, p, spl_kind, mapping, ppc=10): print(derham.domain_array) # create particles - loading_params = LoadingParameters( - ppc=ppc, - seed=seed, - moments=(0.0, 0.0, 0.0, 1.0, 1.0, 1.0), - spatial="uniform", - ) + loading_params = { + "seed": seed, + "moments": [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], + "spatial": "uniform", + } particles = Particles6D( comm_world=comm, + ppc=ppc, domain_decomp=domain_decomp, + bc=["periodic", "periodic", "periodic"], loading_params=loading_params, domain=domain, ) @@ -85,7 +85,7 @@ def test_draw(Nel, p, spl_kind, mapping, ppc=10): particles.initialize_weights() _w0 = particles.weights print("Test weights:") - print(f"rank {rank}:", _w0.shape, xp.min(_w0), xp.max(_w0)) + print(f"rank {rank}:", _w0.shape, np.min(_w0), np.max(_w0)) comm.Barrier() print("Number of particles w/wo holes on each process before sorting : ") @@ -106,17 +106,17 @@ def test_draw(Nel, p, spl_kind, mapping, ppc=10): print("Rank", rank, ":", particles.n_mks_loc, particles.markers.shape[0]) # are all markers in the correct domain? - conds = xp.logical_and( + conds = np.logical_and( particles.markers[:, :3] > derham.domain_array[rank, 0::3], particles.markers[:, :3] < derham.domain_array[rank, 1::3], ) holes = particles.markers[:, 0] == -1.0 - stay = xp.all(conds, axis=1) + stay = np.all(conds, axis=1) - error_mks = particles.markers[xp.logical_and(~stay, ~holes)] + error_mks = particles.markers[np.logical_and(~stay, ~holes)] assert error_mks.size == 0, ( - f"rank {rank} | markers not on correct process: {xp.nonzero(xp.logical_and(~stay, ~holes))} \n corresponding positions:\n {error_mks[:, :3]}" + f"rank {rank} | markers not on correct process: {np.nonzero(np.logical_and(~stay, ~holes))} \n corresponding positions:\n {error_mks[:, :3]}" ) diff --git a/src/struphy/pic/tests/test_mat_vec_filler.py b/src/struphy/pic/tests/test_mat_vec_filler.py index 073d52ae7..7edbf7278 100644 --- a/src/struphy/pic/tests/test_mat_vec_filler.py +++ b/src/struphy/pic/tests/test_mat_vec_filler.py @@ -1,6 +1,7 @@ -import cunumpy as xp import pytest +from struphy.utils.arrays import xp as np + @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @pytest.mark.parametrize("p", [[1, 2, 3]]) @@ -32,12 +33,12 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): print(f"\nNel={Nel}, p={p}, spl_kind={spl_kind}\n") # DR attributes - pn = xp.array(DR.p) + pn = np.array(DR.p) tn1, tn2, tn3 = DR.Vh_fem["0"].knots starts1 = {} - starts1["v0"] = xp.array(DR.Vh["0"].starts) + starts1["v0"] = np.array(DR.Vh["0"].starts) comm.Barrier() sleep(0.02 * (rank + 1)) @@ -70,11 +71,8 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): for j in range(3): mat["v1"][-1] += [ StencilMatrix( - DR.Vh["1"].spaces[i], - DR.Vh["1"].spaces[j], - backend=PSYDAC_BACKEND_GPYCCEL, - precompiled=True, - )._data, + DR.Vh["1"].spaces[i], DR.Vh["1"].spaces[j], backend=PSYDAC_BACKEND_GPYCCEL, precompiled=True + )._data ] vec["v1"] = [] @@ -87,11 +85,8 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): for j in range(3): mat["v2"][-1] += [ StencilMatrix( - DR.Vh["2"].spaces[i], - DR.Vh["2"].spaces[j], - backend=PSYDAC_BACKEND_GPYCCEL, - precompiled=True, - )._data, + DR.Vh["2"].spaces[i], DR.Vh["2"].spaces[j], backend=PSYDAC_BACKEND_GPYCCEL, precompiled=True + )._data ] vec["v2"] = [] @@ -99,14 +94,14 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): vec["v2"] += [StencilVector(DR.Vh["2"].spaces[i])._data] # Some filling for testing - fill_mat = xp.reshape(xp.arange(9, dtype=float), (3, 3)) + 1.0 - fill_vec = xp.arange(3, dtype=float) + 1.0 + fill_mat = np.reshape(np.arange(9, dtype=float), (3, 3)) + 1.0 + fill_vec = np.arange(3, dtype=float) + 1.0 # Random points in domain of process (VERY IMPORTANT to be in the right domain, otherwise NON-TRACKED errors occur in filler_kernels !!) dom = DR.domain_array[rank] - eta1s = xp.random.rand(n_markers) * (dom[1] - dom[0]) + dom[0] - eta2s = xp.random.rand(n_markers) * (dom[4] - dom[3]) + dom[3] - eta3s = xp.random.rand(n_markers) * (dom[7] - dom[6]) + dom[6] + eta1s = np.random.rand(n_markers) * (dom[1] - dom[0]) + dom[0] + eta2s = np.random.rand(n_markers) * (dom[4] - dom[3]) + dom[3] + eta3s = np.random.rand(n_markers) * (dom[7] - dom[6]) + dom[6] for eta1, eta2, eta3 in zip(eta1s, eta2s, eta3s): comm.Barrier() @@ -123,13 +118,13 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): span3 = bsp.find_span(tn3, DR.p[2], eta3) # non-zero spline values at eta - bn1 = xp.empty(DR.p[0] + 1, dtype=float) - bn2 = xp.empty(DR.p[1] + 1, dtype=float) - bn3 = xp.empty(DR.p[2] + 1, dtype=float) + bn1 = np.empty(DR.p[0] + 1, dtype=float) + bn2 = np.empty(DR.p[1] + 1, dtype=float) + bn3 = np.empty(DR.p[2] + 1, dtype=float) - bd1 = xp.empty(DR.p[0], dtype=float) - bd2 = xp.empty(DR.p[1], dtype=float) - bd3 = xp.empty(DR.p[2], dtype=float) + bd1 = np.empty(DR.p[0], dtype=float) + bd2 = np.empty(DR.p[1], dtype=float) + bd3 = np.empty(DR.p[2], dtype=float) bsp.b_d_splines_slim(tn1, DR.p[0], eta1, span1, bn1, bd1) bsp.b_d_splines_slim(tn2, DR.p[1], eta2, span2, bn2, bd2) @@ -141,9 +136,9 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): ie3 = span3 - pn[2] # global indices of non-vanishing B- and D-splines (no modulo) - glob_n1 = xp.arange(ie1, ie1 + pn[0] + 1) - glob_n2 = xp.arange(ie2, ie2 + pn[1] + 1) - glob_n3 = xp.arange(ie3, ie3 + pn[2] + 1) + glob_n1 = np.arange(ie1, ie1 + pn[0] + 1) + glob_n2 = np.arange(ie2, ie2 + pn[1] + 1) + glob_n3 = np.arange(ie3, ie3 + pn[2] + 1) glob_d1 = glob_n1[:-1] glob_d2 = glob_n2[:-1] @@ -169,10 +164,10 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): # local column indices in _data of non-vanishing B- and D-splines, as sets for comparison cols = [{}, {}, {}] for n in range(3): - cols[n]["NN"] = set(xp.arange(2 * pn[n] + 1)) - cols[n]["ND"] = set(xp.arange(2 * pn[n])) - cols[n]["DN"] = set(xp.arange(1, 2 * pn[n] + 1)) - cols[n]["DD"] = set(xp.arange(1, 2 * pn[n])) + cols[n]["NN"] = set(np.arange(2 * pn[n] + 1)) + cols[n]["ND"] = set(np.arange(2 * pn[n])) + cols[n]["DN"] = set(np.arange(1, 2 * pn[n] + 1)) + cols[n]["DD"] = set(np.arange(1, 2 * pn[n])) # testing vector-valued spaces spaces_vector = ["v1", "v2"] @@ -219,13 +214,7 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): for n, ij in enumerate(ind_pairs): assert_mat( - args[n], - rows, - cols, - basis[space][ij[0]], - basis[space][ij[1]], - rank, - verbose=False, + args[n], rows, cols, basis[space][ij[0]], basis[space][ij[1]], rank, verbose=False ) # assertion test of mat if mv == "m_v": for i in range(3): @@ -242,13 +231,7 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): for n, ij in enumerate(ind_pairs): assert_mat( - args[n], - rows, - cols, - basis[space][ij[0]], - basis[space][ij[1]], - rank, - verbose=False, + args[n], rows, cols, basis[space][ij[0]], basis[space][ij[1]], rank, verbose=False ) # assertion test of mat if mv == "m_v": for i in range(3): @@ -261,14 +244,14 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): # testing salar spaces if rank == 0: - print("\nTesting mat_fill_b_v0 ...") + print(f"\nTesting mat_fill_b_v0 ...") ptomat.mat_fill_b_v0(DR.args_derham, eta1, eta2, eta3, mat["v0"], fill_mat[0, 0]) assert_mat(mat["v0"], rows, cols, basis["v0"], basis["v0"], rank) # assertion test of mat count += 1 comm.Barrier() if rank == 0: - print("\nTesting m_v_fill_b_v0 ...") + print(f"\nTesting m_v_fill_b_v0 ...") ptomat.m_v_fill_b_v0(DR.args_derham, eta1, eta2, eta3, mat["v0"], fill_mat[0, 0], vec["v0"], fill_vec[0]) assert_mat(mat["v0"], rows, cols, basis["v0"], basis["v0"], rank) # assertion test of mat assert_vec(vec["v0"], rows, basis["v0"], rank) # assertion test of vec @@ -276,14 +259,14 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): comm.Barrier() if rank == 0: - print("\nTesting mat_fill_b_v3 ...") + print(f"\nTesting mat_fill_b_v3 ...") ptomat.mat_fill_b_v3(DR.args_derham, eta1, eta2, eta3, mat["v3"], fill_mat[0, 0]) assert_mat(mat["v3"], rows, cols, basis["v3"], basis["v3"], rank) # assertion test of mat count += 1 comm.Barrier() if rank == 0: - print("\nTesting m_v_fill_b_v3 ...") + print(f"\nTesting m_v_fill_b_v3 ...") ptomat.m_v_fill_b_v3(DR.args_derham, eta1, eta2, eta3, mat["v3"], fill_mat[0, 0], vec["v3"], fill_vec[0]) assert_mat(mat["v3"], rows, cols, basis["v3"], basis["v3"], rank) # assertion test of mat assert_vec(vec["v3"], rows, basis["v3"], rank) # assertion test of vec @@ -291,14 +274,14 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): comm.Barrier() if rank == 0: - print("\nTesting mat_fill_v0 ...") + print(f"\nTesting mat_fill_v0 ...") ptomat.mat_fill_v0(DR.args_derham, span1, span2, span3, mat["v0"], fill_mat[0, 0]) assert_mat(mat["v0"], rows, cols, basis["v0"], basis["v0"], rank) # assertion test of mat count += 1 comm.Barrier() if rank == 0: - print("\nTesting m_v_fill_v0 ...") + print(f"\nTesting m_v_fill_v0 ...") ptomat.m_v_fill_v0(DR.args_derham, span1, span2, span3, mat["v0"], fill_mat[0, 0], vec["v0"], fill_vec[0]) assert_mat(mat["v0"], rows, cols, basis["v0"], basis["v0"], rank) # assertion test of mat assert_vec(vec["v0"], rows, basis["v0"], rank) # assertion test of vec @@ -306,14 +289,14 @@ def test_particle_to_mat_kernels(Nel, p, spl_kind, n_markers=1): comm.Barrier() if rank == 0: - print("\nTesting mat_fill_v3 ...") + print(f"\nTesting mat_fill_v3 ...") ptomat.mat_fill_v3(DR.args_derham, span1, span2, span3, mat["v3"], fill_mat[0, 0]) assert_mat(mat["v3"], rows, cols, basis["v3"], basis["v3"], rank) # assertion test of mat count += 1 comm.Barrier() if rank == 0: - print("\nTesting m_v_fill_v3 ...") + print(f"\nTesting m_v_fill_v3 ...") ptomat.m_v_fill_v3(DR.args_derham, span1, span2, span3, mat["v3"], fill_mat[0, 0], vec["v3"], fill_vec[0]) assert_mat(mat["v3"], rows, cols, basis["v3"], basis["v3"], rank) # assertion test of mat assert_vec(vec["v3"], rows, basis["v3"], rank) # assertion test of vec @@ -354,23 +337,23 @@ def assert_mat(mat, rows, cols, row_str, col_str, rank, verbose=False): """ assert len(mat.shape) == 6 # assert non NaN - assert ~xp.isnan(mat).any() + assert ~np.isnan(mat).any() atol = 1e-14 if verbose: print(f"\n({row_str}) ({col_str})") - print(f"rank {rank} | ind_row1: {set(xp.where(mat > atol)[0])}") - print(f"rank {rank} | ind_row2: {set(xp.where(mat > atol)[1])}") - print(f"rank {rank} | ind_row3: {set(xp.where(mat > atol)[2])}") - print(f"rank {rank} | ind_col1: {set(xp.where(mat > atol)[3])}") - print(f"rank {rank} | ind_col2: {set(xp.where(mat > atol)[4])}") - print(f"rank {rank} | ind_col3: {set(xp.where(mat > atol)[5])}") + print(f"rank {rank} | ind_row1: {set(np.where(mat > atol)[0])}") + print(f"rank {rank} | ind_row2: {set(np.where(mat > atol)[1])}") + print(f"rank {rank} | ind_row3: {set(np.where(mat > atol)[2])}") + print(f"rank {rank} | ind_col1: {set(np.where(mat > atol)[3])}") + print(f"rank {rank} | ind_col2: {set(np.where(mat > atol)[4])}") + print(f"rank {rank} | ind_col3: {set(np.where(mat > atol)[5])}") # check if correct indices are non-zero for n, (r, c) in enumerate(zip(row_str, col_str)): - assert set(xp.where(mat > atol)[n]) == rows[n][r] - assert set(xp.where(mat > atol)[n + 3]) == cols[n][r + c] + assert set(np.where(mat > atol)[n]) == rows[n][r] + assert set(np.where(mat > atol)[n + 3]) == cols[n][r + c] # Set matrix back to zero mat[:, :] = 0.0 @@ -401,19 +384,19 @@ def assert_vec(vec, rows, row_str, rank, verbose=False): """ assert len(vec.shape) == 3 # assert non Nan - assert ~xp.isnan(vec).any() + assert ~np.isnan(vec).any() atol = 1e-14 if verbose: print(f"\n({row_str})") - print(f"rank {rank} | ind_row1: {set(xp.where(vec > atol)[0])}") - print(f"rank {rank} | ind_row2: {set(xp.where(vec > atol)[1])}") - print(f"rank {rank} | ind_row3: {set(xp.where(vec > atol)[2])}") + print(f"rank {rank} | ind_row1: {set(np.where(vec > atol)[0])}") + print(f"rank {rank} | ind_row2: {set(np.where(vec > atol)[1])}") + print(f"rank {rank} | ind_row3: {set(np.where(vec > atol)[2])}") # check if correct indices are non-zero for n, r in enumerate(row_str): - assert set(xp.where(vec > atol)[n]) == rows[n][r] + assert set(np.where(vec > atol)[n]) == rows[n][r] # Set vector back to zero vec[:] = 0.0 diff --git a/src/struphy/pic/tests/test_pic_legacy_files/accumulation.py b/src/struphy/pic/tests/test_pic_legacy_files/accumulation.py index 6bb225571..4b0cbc7ae 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/accumulation.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/accumulation.py @@ -8,11 +8,11 @@ import time -import cunumpy as xp import scipy.sparse as spa from psydac.ddm.mpi import mpi as MPI import struphy.pic.tests.test_pic_legacy_files.accumulation_kernels_3d as pic_ker_3d +from struphy.utils.arrays import xp as np # import struphy.pic.tests.test_pic_legacy_files.accumulation_kernels_2d as pic_ker_2d @@ -69,22 +69,22 @@ def __init__(self, tensor_space_FEM, domain, basis_u, mpi_comm, use_control, cv_ else: Ni = getattr(self.space, "Nbase_" + str(self.basis_u) + "form")[a] - self.vecs_loc[a] = xp.empty((Ni[0], Ni[1], Ni[2]), dtype=float) - self.vecs_glo[a] = xp.empty((Ni[0], Ni[1], Ni[2]), dtype=float) + self.vecs_loc[a] = np.empty((Ni[0], Ni[1], Ni[2]), dtype=float) + self.vecs_glo[a] = np.empty((Ni[0], Ni[1], Ni[2]), dtype=float) for b in range(3): if self.space.dim == 2: - self.blocks_loc[a][b] = xp.empty( + self.blocks_loc[a][b] = np.empty( (Ni[0], Ni[1], Ni[2], 2 * self.space.p[0] + 1, 2 * self.space.p[1] + 1, self.space.NbaseN[2]), dtype=float, ) - self.blocks_glo[a][b] = xp.empty( + self.blocks_glo[a][b] = np.empty( (Ni[0], Ni[1], Ni[2], 2 * self.space.p[0] + 1, 2 * self.space.p[1] + 1, self.space.NbaseN[2]), dtype=float, ) else: - self.blocks_loc[a][b] = xp.empty( + self.blocks_loc[a][b] = np.empty( ( Ni[0], Ni[1], @@ -95,7 +95,7 @@ def __init__(self, tensor_space_FEM, domain, basis_u, mpi_comm, use_control, cv_ ), dtype=float, ) - self.blocks_glo[a][b] = xp.empty( + self.blocks_glo[a][b] = np.empty( ( Ni[0], Ni[1], @@ -134,16 +134,16 @@ def to_sparse_step1(self): Ni = self.space.Nbase_2form[a] Nj = self.space.Nbase_2form[b] - indices = xp.indices(self.blocks_glo[a][b].shape) + indices = np.indices(self.blocks_glo[a][b].shape) row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni[:2], self.space.p[:2])] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni[:2], self.space.p[:2])] if self.space.dim == 2: - shift += [xp.zeros(self.space.NbaseN[2], dtype=int)] + shift += [np.zeros(self.space.NbaseN[2], dtype=int)] else: - shift += [xp.arange(Ni[2]) - self.space.p[2]] + shift += [np.arange(Ni[2]) - self.space.p[2]] col1 = (indices[3] + shift[0][:, None, None, None, None, None]) % Nj[0] col2 = (indices[4] + shift[1][None, :, None, None, None, None]) % Nj[1] @@ -159,8 +159,7 @@ def to_sparse_step1(self): # final block matrix M = spa.bmat( - [[None, M[0][1], M[0][2]], [-M[0][1].T, None, M[1][2]], [-M[0][2].T, -M[1][2].T, None]], - format="csr", + [[None, M[0][1], M[0][2]], [-M[0][1].T, None, M[1][2]], [-M[0][2].T, -M[1][2].T, None]], format="csr" ) # apply extraction operator @@ -202,16 +201,16 @@ def to_sparse_step3(self): Ni = self.space.Nbase_2form[a] Nj = self.space.Nbase_2form[b] - indices = xp.indices(self.blocks_glo[a][b].shape) + indices = np.indices(self.blocks_glo[a][b].shape) row = (Ni[1] * Ni[2] * indices[0] + Ni[2] * indices[1] + indices[2]).flatten() - shift = [xp.arange(Ni) - p for Ni, p in zip(Ni[:2], self.space.p[:2])] + shift = [np.arange(Ni) - p for Ni, p in zip(Ni[:2], self.space.p[:2])] if self.space.dim == 2: - shift += [xp.zeros(self.space.NbaseN[2], dtype=int)] + shift += [np.zeros(self.space.NbaseN[2], dtype=int)] else: - shift += [xp.arange(Ni[2]) - self.space.p[2]] + shift += [np.arange(Ni[2]) - self.space.p[2]] col1 = (indices[3] + shift[0][:, None, None, None, None, None]) % Nj[0] col2 = (indices[4] + shift[1][None, :, None, None, None, None]) % Nj[1] @@ -227,8 +226,7 @@ def to_sparse_step3(self): # final block matrix M = spa.bmat( - [[M[0][0], M[0][1], M[0][2]], [M[0][1].T, M[1][1], M[1][2]], [M[0][2].T, M[1][2].T, M[2][2]]], - format="csr", + [[M[0][0], M[0][1], M[0][2]], [M[0][1].T, M[1][1], M[1][2]], [M[0][2].T, M[1][2].T, M[2][2]]], format="csr" ) # apply extraction operator @@ -530,15 +528,15 @@ def assemble_step3(self, b2_eq, b2): # build global sparse matrix and global vector if self.basis_u == 0: return self.to_sparse_step3(), self.space.Ev_0.dot( - xp.concatenate((self.vecs[0].flatten(), self.vecs[1].flatten(), self.vecs[2].flatten())), + np.concatenate((self.vecs[0].flatten(), self.vecs[1].flatten(), self.vecs[2].flatten())) ) elif self.basis_u == 1: return self.to_sparse_step3(), self.space.E1_0.dot( - xp.concatenate((self.vecs[0].flatten(), self.vecs[1].flatten(), self.vecs[2].flatten())), + np.concatenate((self.vecs[0].flatten(), self.vecs[1].flatten(), self.vecs[2].flatten())) ) elif self.basis_u == 2: return self.to_sparse_step3(), self.space.E2_0.dot( - xp.concatenate((self.vecs[0].flatten(), self.vecs[1].flatten(), self.vecs[2].flatten())), + np.concatenate((self.vecs[0].flatten(), self.vecs[1].flatten(), self.vecs[2].flatten())) ) diff --git a/src/struphy/pic/tests/test_pic_legacy_files/accumulation_kernels_3d.py b/src/struphy/pic/tests/test_pic_legacy_files/accumulation_kernels_3d.py index 349cca379..c70261023 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/accumulation_kernels_3d.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/accumulation_kernels_3d.py @@ -185,49 +185,13 @@ def kernel_step1( bsp.b_d_splines_slim(t3, int(pn3), eta3, int(span3), bn3, bd3) b[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - b2_1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], b2_1 ) b[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - b2_2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], b2_2 ) b[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - b2_3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], b2_3 ) b_prod[0, 1] = -b[2] @@ -590,49 +554,13 @@ def kernel_step3( bsp.b_d_splines_slim(t3, int(pn3), eta3, int(span3), bn3, bd3) b[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - b2_1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], b2_1 ) b[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - b2_2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], b2_2 ) b[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - b2_3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], b2_3 ) b_prod[0, 1] = -b[2] @@ -1202,14 +1130,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat11[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] for jl1 in range(pn1 + 1): @@ -1221,14 +1142,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat12[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] for jl1 in range(pn1 + 1): @@ -1240,14 +1154,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat13[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] # add contribution to 22 component (NDN NDN) and 23 component (NDN NND) @@ -1272,14 +1179,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat22[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] for jl1 in range(pn1 + 1): @@ -1291,14 +1191,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat23[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] # add contribution to 33 component (NND NND) @@ -1323,14 +1216,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat33[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] elif basis_u == 2: @@ -1356,14 +1242,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat11[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] for jl1 in range(pd1 + 1): @@ -1375,14 +1254,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat12[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] for jl1 in range(pd1 + 1): @@ -1394,14 +1266,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat13[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] # add contribution to 22 component (DND DND) and 23 component (DND DDN) @@ -1426,14 +1291,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat22[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] for jl1 in range(pd1 + 1): @@ -1445,14 +1303,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat23[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] # add contribution to 33 component (DDN DDN) @@ -1477,14 +1328,7 @@ def kernel_step_ph_full( for vp in range(3): for vq in range(3): mat33[ - i1, - i2, - i3, - pn1 + jl1 - il1, - pn2 + jl2 - il2, - pn3 + jl3 - il3, - vp, - vq, + i1, i2, i3, pn1 + jl1 - il1, pn2 + jl2 - il2, pn3 + jl3 - il3, vp, vq ] += bj3 * v[vp] * v[vq] # -- removed omp: #$ omp end parallel diff --git a/src/struphy/pic/tests/test_pic_legacy_files/mappings_3d.py b/src/struphy/pic/tests/test_pic_legacy_files/mappings_3d.py index 2e54d34dd..587b8b15f 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/mappings_3d.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/mappings_3d.py @@ -74,53 +74,17 @@ def f( if kind_map == 0: if component == 1: value = eva_3d.evaluate_n_n_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cx, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cx, eta1, eta2, eta3 ) elif component == 2: value = eva_3d.evaluate_n_n_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cy, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cy, eta1, eta2, eta3 ) elif component == 3: value = eva_3d.evaluate_n_n_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cz, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cz, eta1, eta2, eta3 ) # ==== 2d spline (straight in 3rd direction) === @@ -146,7 +110,7 @@ def f( elif kind_map == 2: if component == 1: value = eva_2d.evaluate_n_n(tn1, tn2, pn[0], pn[1], nbase_n[0], nbase_n[1], cx[:, :, 0], eta1, eta2) * cos( - 2 * pi * eta3, + 2 * pi * eta3 ) if eta1 == 0.0 and cx[0, 0, 0] == cx[0, 1, 0]: @@ -160,7 +124,7 @@ def f( elif component == 3: value = eva_2d.evaluate_n_n(tn1, tn2, pn[0], pn[1], nbase_n[0], nbase_n[1], cx[:, :, 0], eta1, eta2) * sin( - 2 * pi * eta3, + 2 * pi * eta3 ) if eta1 == 0.0 and cx[0, 0, 0] == cx[0, 1, 0]: @@ -335,147 +299,39 @@ def df( if kind_map == 0: if component == 11: value = eva_3d.evaluate_diffn_n_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cx, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cx, eta1, eta2, eta3 ) elif component == 12: value = eva_3d.evaluate_n_diffn_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cx, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cx, eta1, eta2, eta3 ) elif component == 13: value = eva_3d.evaluate_n_n_diffn( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cx, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cx, eta1, eta2, eta3 ) elif component == 21: value = eva_3d.evaluate_diffn_n_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cy, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cy, eta1, eta2, eta3 ) elif component == 22: value = eva_3d.evaluate_n_diffn_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cy, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cy, eta1, eta2, eta3 ) elif component == 23: value = eva_3d.evaluate_n_n_diffn( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cy, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cy, eta1, eta2, eta3 ) elif component == 31: value = eva_3d.evaluate_diffn_n_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cz, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cz, eta1, eta2, eta3 ) elif component == 32: value = eva_3d.evaluate_n_diffn_n( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cz, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cz, eta1, eta2, eta3 ) elif component == 33: value = eva_3d.evaluate_n_n_diffn( - tn1, - tn2, - tn3, - pn[0], - pn[1], - pn[2], - nbase_n[0], - nbase_n[1], - nbase_n[2], - cz, - eta1, - eta2, - eta3, + tn1, tn2, tn3, pn[0], pn[1], pn[2], nbase_n[0], nbase_n[1], nbase_n[2], cz, eta1, eta2, eta3 ) # ==== 2d spline (straight in 3rd direction) === @@ -513,27 +369,11 @@ def df( elif kind_map == 2: if component == 11: value = eva_2d.evaluate_diffn_n( - tn1, - tn2, - pn[0], - pn[1], - nbase_n[0], - nbase_n[1], - cx[:, :, 0], - eta1, - eta2, + tn1, tn2, pn[0], pn[1], nbase_n[0], nbase_n[1], cx[:, :, 0], eta1, eta2 ) * cos(2 * pi * eta3) elif component == 12: value = eva_2d.evaluate_n_diffn( - tn1, - tn2, - pn[0], - pn[1], - nbase_n[0], - nbase_n[1], - cx[:, :, 0], - eta1, - eta2, + tn1, tn2, pn[0], pn[1], nbase_n[0], nbase_n[1], cx[:, :, 0], eta1, eta2 ) * cos(2 * pi * eta3) if eta1 == 0.0 and cx[0, 0, 0] == cx[0, 1, 0]: @@ -557,27 +397,11 @@ def df( value = 0.0 elif component == 31: value = eva_2d.evaluate_diffn_n( - tn1, - tn2, - pn[0], - pn[1], - nbase_n[0], - nbase_n[1], - cx[:, :, 0], - eta1, - eta2, + tn1, tn2, pn[0], pn[1], nbase_n[0], nbase_n[1], cx[:, :, 0], eta1, eta2 ) * sin(2 * pi * eta3) elif component == 32: value = eva_2d.evaluate_n_diffn( - tn1, - tn2, - pn[0], - pn[1], - nbase_n[0], - nbase_n[1], - cx[:, :, 0], - eta1, - eta2, + tn1, tn2, pn[0], pn[1], nbase_n[0], nbase_n[1], cx[:, :, 0], eta1, eta2 ) * sin(2 * pi * eta3) if eta1 == 0.0 and cx[0, 0, 0] == cx[0, 1, 0]: diff --git a/src/struphy/pic/tests/test_pic_legacy_files/mappings_3d_fast.py b/src/struphy/pic/tests/test_pic_legacy_files/mappings_3d_fast.py index f87380685..fbd912b39 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/mappings_3d_fast.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/mappings_3d_fast.py @@ -264,51 +264,19 @@ def df_all( if mat_or_vec == 0 or mat_or_vec == 2: # sum-up non-vanishing contributions (line 1: df_11, df_12 and df_13) mat_out[0, 0] = evaluation_kernel_2d( - pn[0], - pn[1], - der1, - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], der1, b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) mat_out[0, 1] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - der2, - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], b1[pn[0]], der2, span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) mat_out[0, 2] = 0.0 # sum-up non-vanishing contributions (line 2: df_21, df_22 and df_23) mat_out[1, 0] = evaluation_kernel_2d( - pn[0], - pn[1], - der1, - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cy[:, :, 0], + pn[0], pn[1], der1, b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cy[:, :, 0] ) mat_out[1, 1] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - der2, - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cy[:, :, 0], + pn[0], pn[1], b1[pn[0]], der2, span_n1, span_n2, nbase_n[0], nbase_n[1], cy[:, :, 0] ) mat_out[1, 2] = 0.0 @@ -320,26 +288,10 @@ def df_all( # evaluate mapping if mat_or_vec == 1 or mat_or_vec == 2: vec_out[0] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], b1[pn[0]], b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) vec_out[1] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cy[:, :, 0], + pn[0], pn[1], b1[pn[0]], b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cy[:, :, 0] ) vec_out[2] = lz * eta3 @@ -353,38 +305,14 @@ def df_all( if mat_or_vec == 0 or mat_or_vec == 2: # sum-up non-vanishing contributions (line 1: df_11, df_12 and df_13) mat_out[0, 0] = evaluation_kernel_2d( - pn[0], - pn[1], - der1, - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], der1, b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) * cos(2 * pi * eta3) mat_out[0, 1] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - der2, - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], b1[pn[0]], der2, span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) * cos(2 * pi * eta3) mat_out[0, 2] = ( evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], b1[pn[0]], b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) * sin(2 * pi * eta3) * (-2 * pi) @@ -392,63 +320,23 @@ def df_all( # sum-up non-vanishing contributions (line 2: df_21, df_22 and df_23) mat_out[1, 0] = evaluation_kernel_2d( - pn[0], - pn[1], - der1, - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cy[:, :, 0], + pn[0], pn[1], der1, b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cy[:, :, 0] ) mat_out[1, 1] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - der2, - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cy[:, :, 0], + pn[0], pn[1], b1[pn[0]], der2, span_n1, span_n2, nbase_n[0], nbase_n[1], cy[:, :, 0] ) mat_out[1, 2] = 0.0 # sum-up non-vanishing contributions (line 3: df_31, df_32 and df_33) mat_out[2, 0] = evaluation_kernel_2d( - pn[0], - pn[1], - der1, - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], der1, b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) * sin(2 * pi * eta3) mat_out[2, 1] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - der2, - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], b1[pn[0]], der2, span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) * sin(2 * pi * eta3) mat_out[2, 2] = ( evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], b1[pn[0]], b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) * cos(2 * pi * eta3) * 2 @@ -458,37 +346,13 @@ def df_all( # evaluate mapping if mat_or_vec == 1 or mat_or_vec == 2: vec_out[0] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], b1[pn[0]], b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) * cos(2 * pi * eta3) vec_out[1] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cy[:, :, 0], + pn[0], pn[1], b1[pn[0]], b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cy[:, :, 0] ) vec_out[2] = evaluation_kernel_2d( - pn[0], - pn[1], - b1[pn[0]], - b2[pn[1]], - span_n1, - span_n2, - nbase_n[0], - nbase_n[1], - cx[:, :, 0], + pn[0], pn[1], b1[pn[0]], b2[pn[1]], span_n1, span_n2, nbase_n[0], nbase_n[1], cx[:, :, 0] ) * sin(2 * pi * eta3) # analytical mapping @@ -496,150 +360,33 @@ def df_all( # evaluate Jacobian matrix if mat_or_vec == 0 or mat_or_vec == 2: mat_out[0, 0] = mapping.df( - eta1, - eta2, - eta3, - 11, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 11, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) mat_out[0, 1] = mapping.df( - eta1, - eta2, - eta3, - 12, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 12, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) mat_out[0, 2] = mapping.df( - eta1, - eta2, - eta3, - 13, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 13, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) mat_out[1, 0] = mapping.df( - eta1, - eta2, - eta3, - 21, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 21, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) mat_out[1, 1] = mapping.df( - eta1, - eta2, - eta3, - 22, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 22, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) mat_out[1, 2] = mapping.df( - eta1, - eta2, - eta3, - 23, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 23, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) mat_out[2, 0] = mapping.df( - eta1, - eta2, - eta3, - 31, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 31, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) mat_out[2, 1] = mapping.df( - eta1, - eta2, - eta3, - 32, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 32, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) mat_out[2, 2] = mapping.df( - eta1, - eta2, - eta3, - 33, - kind_map, - params_map, - tn1, - tn2, - tn3, - pn, - nbase_n, - cx, - cy, - cz, + eta1, eta2, eta3, 33, kind_map, params_map, tn1, tn2, tn3, pn, nbase_n, cx, cy, cz ) # evaluate mapping diff --git a/src/struphy/pic/tests/test_pic_legacy_files/pusher.py b/src/struphy/pic/tests/test_pic_legacy_files/pusher.py index 518e19ee0..6bdb74642 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/pusher.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/pusher.py @@ -1,8 +1,7 @@ -import cunumpy as xp - import struphy.pic.tests.test_pic_legacy_files.pusher_pos as push_pos import struphy.pic.tests.test_pic_legacy_files.pusher_vel_2d as push_vel_2d import struphy.pic.tests.test_pic_legacy_files.pusher_vel_3d as push_vel_3d +from struphy.utils.arrays import xp as np class Pusher: diff --git a/src/struphy/pic/tests/test_pic_legacy_files/pusher_pos.py b/src/struphy/pic/tests/test_pic_legacy_files/pusher_pos.py index 81b5e1e53..78631440e 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/pusher_pos.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/pusher_pos.py @@ -532,52 +532,13 @@ def pusher_step4_pcart( # compute old pseudo-cartesian coordinates fx_pseudo[0] = mapping.f( - eta[0], - eta[1], - eta[2], - 1, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 1, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) fx_pseudo[1] = mapping.f( - eta[0], - eta[1], - eta[2], - 2, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 2, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) fx_pseudo[2] = mapping.f( - eta[0], - eta[1], - eta[2], - 3, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 3, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) # evaluate old Jacobian matrix of mapping F @@ -627,150 +588,33 @@ def pusher_step4_pcart( # evaluate old Jacobian matrix of mapping F_pseudo df_pseudo_old[0, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 11, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 11, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo_old[0, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 12, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 12, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo_old[0, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 13, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 13, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo_old[1, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 21, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 21, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo_old[1, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 22, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 22, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo_old[1, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 23, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 23, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo_old[2, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 31, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 31, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo_old[2, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 32, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 32, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo_old[2, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 33, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 33, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) while True: @@ -843,150 +687,33 @@ def pusher_step4_pcart( # evaluate Jacobian matrix of mapping F_pseudo df_pseudo[0, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 11, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 11, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[0, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 12, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 12, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[0, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 13, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 13, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 21, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 21, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 22, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 22, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 23, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 23, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 31, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 31, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 32, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 32, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 33, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 33, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) # compute df_pseudo*df_inv*v @@ -1057,150 +784,33 @@ def pusher_step4_pcart( # evaluate Jacobian matrix of mapping F_pseudo df_pseudo[0, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 11, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 11, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[0, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 12, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 12, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[0, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 13, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 13, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 21, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 21, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 22, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 22, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 23, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 23, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 31, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 31, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 32, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 32, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 33, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 33, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) # compute df_pseudo*df_inv*v @@ -1271,150 +881,33 @@ def pusher_step4_pcart( # evaluate Jacobian matrix of mapping F_pseudo df_pseudo[0, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 11, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 11, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[0, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 12, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 12, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[0, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 13, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 13, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 21, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 21, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 22, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 22, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[1, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 23, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 23, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 0] = mapping.df( - eta[0], - eta[1], - eta[2], - 31, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 31, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 1] = mapping.df( - eta[0], - eta[1], - eta[2], - 32, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 32, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) df_pseudo[2, 2] = mapping.df( - eta[0], - eta[1], - eta[2], - 33, - map_pseudo, - params_pseudo, - tf1, - tf2, - tf3, - pf, - nbasef, - cx, - cy, - cz, + eta[0], eta[1], eta[2], 33, map_pseudo, params_pseudo, tf1, tf2, tf3, pf, nbasef, cx, cy, cz ) # compute df_pseudo*df_inv*v @@ -1881,98 +1374,26 @@ def pusher_rk4_pc_full( # velocity field if basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(Ginv, u, k1_u) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) k1_u[:] = u / det_df @@ -2070,98 +1491,26 @@ def pusher_rk4_pc_full( # velocity field if basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(Ginv, u, k2_u) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) k2_u[:] = u / det_df @@ -2259,98 +1608,26 @@ def pusher_rk4_pc_full( # velocity field if basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(Ginv, u, k3_u) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) k3_u[:] = u / det_df @@ -2445,98 +1722,26 @@ def pusher_rk4_pc_full( # velocity field if basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(Ginv, u, k4_u) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) k4_u[:] = u / det_df @@ -2787,98 +1992,26 @@ def pusher_rk4_pc_perp( # velocity field if basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(Ginv, u, k1_u) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) k1_u[:] = u / det_df @@ -2975,98 +2108,26 @@ def pusher_rk4_pc_perp( # velocity field if basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(Ginv, u, k2_u) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) k2_u[:] = u / det_df @@ -3162,98 +2223,26 @@ def pusher_rk4_pc_perp( # velocity field if basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(Ginv, u, k3_u) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) k3_u[:] = u / det_df @@ -3349,98 +2338,26 @@ def pusher_rk4_pc_perp( # velocity field if basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(Ginv, u, k4_u) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) k4_u[:] = u / det_df diff --git a/src/struphy/pic/tests/test_pic_legacy_files/pusher_vel_2d.py b/src/struphy/pic/tests/test_pic_legacy_files/pusher_vel_2d.py index 0fcc29751..43e320311 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/pusher_vel_2d.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/pusher_vel_2d.py @@ -248,43 +248,19 @@ def pusher_step3( for i in range(nbase_n[2]): u[0] += ( eva2.evaluation_kernel_2d( - pd1, - pn2, - bd1, - bn2, - span1 - 1, - span2 - 0, - nbase_d[0], - nbase_n[1], - u1[:, :, i], + pd1, pn2, bd1, bn2, span1 - 1, span2 - 0, nbase_d[0], nbase_n[1], u1[:, :, i] ) * cs[i] ) u[1] += ( eva2.evaluation_kernel_2d( - pn1, - pd2, - bn1, - bd2, - span1 - 0, - span2 - 1, - nbase_n[0], - nbase_d[1], - u2[:, :, i], + pn1, pd2, bn1, bd2, span1 - 0, span2 - 1, nbase_n[0], nbase_d[1], u2[:, :, i] ) * cs[i] ) u[2] += ( eva2.evaluation_kernel_2d( - pn1, - pn2, - bn1, - bn2, - span1 - 0, - span2 - 0, - nbase_n[0], - nbase_n[1], - u3[:, :, i], + pn1, pn2, bn1, bn2, span1 - 0, span2 - 0, nbase_n[0], nbase_n[1], u3[:, :, i] ) * cs[i] ) @@ -298,43 +274,19 @@ def pusher_step3( for i in range(nbase_n[2]): u[0] += ( eva2.evaluation_kernel_2d( - pn1, - pd2, - bn1, - bd2, - span1 - 0, - span2 - 1, - nbase_n[0], - nbase_d[1], - u1[:, :, i], + pn1, pd2, bn1, bd2, span1 - 0, span2 - 1, nbase_n[0], nbase_d[1], u1[:, :, i] ) * cs[i] ) u[1] += ( eva2.evaluation_kernel_2d( - pd1, - pn2, - bd1, - bn2, - span1 - 1, - span2 - 0, - nbase_d[0], - nbase_n[1], - u2[:, :, i], + pd1, pn2, bd1, bn2, span1 - 1, span2 - 0, nbase_d[0], nbase_n[1], u2[:, :, i] ) * cs[i] ) u[2] += ( eva2.evaluation_kernel_2d( - pd1, - pd2, - bd1, - bd2, - span1 - 1, - span2 - 1, - nbase_d[0], - nbase_d[1], - u3[:, :, i], + pd1, pd2, bd1, bd2, span1 - 1, span2 - 1, nbase_d[0], nbase_d[1], u3[:, :, i] ) * cs[i] ) @@ -347,80 +299,32 @@ def pusher_step3( # equilibrium magnetic field (2-form) b[0] = eva2.evaluation_kernel_2d( - pn1, - pd2, - bn1, - bd2, - span1 - 0, - span2 - 1, - nbase_n[0], - nbase_d[1], - b_eq_1[:, :, 0], + pn1, pd2, bn1, bd2, span1 - 0, span2 - 1, nbase_n[0], nbase_d[1], b_eq_1[:, :, 0] ) b[1] = eva2.evaluation_kernel_2d( - pd1, - pn2, - bd1, - bn2, - span1 - 1, - span2 - 0, - nbase_d[0], - nbase_n[1], - b_eq_2[:, :, 0], + pd1, pn2, bd1, bn2, span1 - 1, span2 - 0, nbase_d[0], nbase_n[1], b_eq_2[:, :, 0] ) b[2] = eva2.evaluation_kernel_2d( - pd1, - pd2, - bd1, - bd2, - span1 - 1, - span2 - 1, - nbase_d[0], - nbase_d[1], - b_eq_3[:, :, 0], + pd1, pd2, bd1, bd2, span1 - 1, span2 - 1, nbase_d[0], nbase_d[1], b_eq_3[:, :, 0] ) # perturbed magnetic field (2-form) for i in range(nbase_n[2]): b[0] += ( eva2.evaluation_kernel_2d( - pn1, - pd2, - bn1, - bd2, - span1 - 0, - span2 - 1, - nbase_n[0], - nbase_d[1], - b_p_1[:, :, i], + pn1, pd2, bn1, bd2, span1 - 0, span2 - 1, nbase_n[0], nbase_d[1], b_p_1[:, :, i] ) * cs[i] ) b[1] += ( eva2.evaluation_kernel_2d( - pd1, - pn2, - bd1, - bn2, - span1 - 1, - span2 - 0, - nbase_d[0], - nbase_n[1], - b_p_2[:, :, i], + pd1, pn2, bd1, bn2, span1 - 1, span2 - 0, nbase_d[0], nbase_n[1], b_p_2[:, :, i] ) * cs[i] ) b[2] += ( eva2.evaluation_kernel_2d( - pd1, - pd2, - bd1, - bd2, - span1 - 1, - span2 - 1, - nbase_d[0], - nbase_d[1], - b_p_3[:, :, i], + pd1, pd2, bd1, bd2, span1 - 1, span2 - 1, nbase_d[0], nbase_d[1], b_p_3[:, :, i] ) * cs[i] ) @@ -434,26 +338,10 @@ def pusher_step3( # gradient of absolute value of magnetic field (1-form) b_grad[0] = eva2.evaluation_kernel_2d( - pn1, - pn2, - der1, - bn2, - span1, - span2, - nbase_n[0], - nbase_n[1], - b_norm[:, :, 0], + pn1, pn2, der1, bn2, span1, span2, nbase_n[0], nbase_n[1], b_norm[:, :, 0] ) b_grad[1] = eva2.evaluation_kernel_2d( - pn1, - pn2, - bn1, - der2, - span1, - span2, - nbase_n[0], - nbase_n[1], - b_norm[:, :, 0], + pn1, pn2, bn1, der2, span1, span2, nbase_n[0], nbase_n[1], b_norm[:, :, 0] ) b_grad[2] = 0.0 @@ -674,80 +562,32 @@ def pusher_step5( # equilibrium magnetic field (2-form) b[0] = eva2.evaluation_kernel_2d( - pn1, - pd2, - bn1, - bd2, - span1 - 0, - span2 - 1, - nbase_n[0], - nbase_d[1], - b_eq_1[:, :, 0], + pn1, pd2, bn1, bd2, span1 - 0, span2 - 1, nbase_n[0], nbase_d[1], b_eq_1[:, :, 0] ) b[1] = eva2.evaluation_kernel_2d( - pd1, - pn2, - bd1, - bn2, - span1 - 1, - span2 - 0, - nbase_d[0], - nbase_n[1], - b_eq_2[:, :, 0], + pd1, pn2, bd1, bn2, span1 - 1, span2 - 0, nbase_d[0], nbase_n[1], b_eq_2[:, :, 0] ) b[2] = eva2.evaluation_kernel_2d( - pd1, - pd2, - bd1, - bd2, - span1 - 1, - span2 - 1, - nbase_d[0], - nbase_d[1], - b_eq_3[:, :, 0], + pd1, pd2, bd1, bd2, span1 - 1, span2 - 1, nbase_d[0], nbase_d[1], b_eq_3[:, :, 0] ) # perturbed magnetic field (2-form) for i in range(nbase_n[2]): b[0] += ( eva2.evaluation_kernel_2d( - pn1, - pd2, - bn1, - bd2, - span1 - 0, - span2 - 1, - nbase_n[0], - nbase_d[1], - b_p_1[:, :, i], + pn1, pd2, bn1, bd2, span1 - 0, span2 - 1, nbase_n[0], nbase_d[1], b_p_1[:, :, i] ) * cs[i] ) b[1] += ( eva2.evaluation_kernel_2d( - pd1, - pn2, - bd1, - bn2, - span1 - 1, - span2 - 0, - nbase_d[0], - nbase_n[1], - b_p_2[:, :, i], + pd1, pn2, bd1, bn2, span1 - 1, span2 - 0, nbase_d[0], nbase_n[1], b_p_2[:, :, i] ) * cs[i] ) b[2] += ( eva2.evaluation_kernel_2d( - pd1, - pd2, - bd1, - bd2, - span1 - 1, - span2 - 1, - nbase_d[0], - nbase_d[1], - b_p_3[:, :, i], + pd1, pd2, bd1, bd2, span1 - 1, span2 - 1, nbase_d[0], nbase_d[1], b_p_3[:, :, i] ) * cs[i] ) diff --git a/src/struphy/pic/tests/test_pic_legacy_files/pusher_vel_3d.py b/src/struphy/pic/tests/test_pic_legacy_files/pusher_vel_3d.py index cd3884209..4eabb26dd 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/pusher_vel_3d.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/pusher_vel_3d.py @@ -227,49 +227,13 @@ def pusher_step3( # velocity field (0-form, push-forward with df) if basis_u == 0: u[0] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span1, - span2, - span3, - nbase_n[0], - nbase_n[1], - nbase_n[2], - u1, + pn1, pn2, pn3, bn1, bn2, bn3, span1, span2, span3, nbase_n[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span1, - span2, - span3, - nbase_n[0], - nbase_n[1], - nbase_n[2], - u2, + pn1, pn2, pn3, bn1, bn2, bn3, span1, span2, span3, nbase_n[0], nbase_n[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span1, - span2, - span3, - nbase_n[0], - nbase_n[1], - nbase_n[2], - u3, + pn1, pn2, pn3, bn1, bn2, bn3, span1, span2, span3, nbase_n[0], nbase_n[1], nbase_n[2], u3 ) linalg.matrix_vector(df, u, u_cart) @@ -277,49 +241,13 @@ def pusher_step3( # velocity field (1-form, push forward with df^(-T)) elif basis_u == 1: u[0] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span1 - 1, - span2, - span3, - nbase_d[0], - nbase_n[1], - nbase_n[2], - u1, + pd1, pn2, pn3, bd1, bn2, bn3, span1 - 1, span2, span3, nbase_d[0], nbase_n[1], nbase_n[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span1, - span2 - 1, - span3, - nbase_n[0], - nbase_d[1], - nbase_n[2], - u2, + pn1, pd2, pn3, bn1, bd2, bn3, span1, span2 - 1, span3, nbase_n[0], nbase_d[1], nbase_n[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span1, - span2, - span3 - 1, - nbase_n[0], - nbase_n[1], - nbase_d[2], - u3, + pn1, pn2, pd3, bn1, bn2, bd3, span1, span2, span3 - 1, nbase_n[0], nbase_n[1], nbase_d[2], u3 ) linalg.matrix_vector(dfinv_t, u, u_cart) @@ -327,49 +255,13 @@ def pusher_step3( # velocity field (2-form, push forward with df/|det df|) elif basis_u == 2: u[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - u1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], u1 ) u[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - u2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], u2 ) u[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - u3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], u3 ) linalg.matrix_vector(df, u, u_cart) @@ -380,49 +272,13 @@ def pusher_step3( # magnetic field (2-form) b[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - b2_1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], b2_1 ) b[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - b2_2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], b2_2 ) b[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - b2_3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], b2_3 ) # push-forward to physical domain @@ -434,49 +290,13 @@ def pusher_step3( # gradient of absolute value of magnetic field (1-form) b_grad[0] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pn3, - der1, - bn2, - bn3, - span1, - span2, - span3, - nbase_n[0], - nbase_n[1], - nbase_n[2], - b0, + pn1, pn2, pn3, der1, bn2, bn3, span1, span2, span3, nbase_n[0], nbase_n[1], nbase_n[2], b0 ) b_grad[1] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - der2, - bn3, - span1, - span2, - span3, - nbase_n[0], - nbase_n[1], - nbase_n[2], - b0, + pn1, pn2, pn3, bn1, der2, bn3, span1, span2, span3, nbase_n[0], nbase_n[1], nbase_n[2], b0 ) b_grad[2] = eva3.evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - bn2, - der3, - span1, - span2, - span3, - nbase_n[0], - nbase_n[1], - nbase_n[2], - b0, + pn1, pn2, pn3, bn1, bn2, der3, span1, span2, span3, nbase_n[0], nbase_n[1], nbase_n[2], b0 ) # push-forward to physical domain @@ -717,49 +537,13 @@ def pusher_step5_old( # magnetic field (2-form) b[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - b2_1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], b2_1 ) b[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - b2_2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], b2_2 ) b[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - b2_3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], b2_3 ) b_prod[0, 1] = -b[2] @@ -1010,49 +794,13 @@ def pusher_step5( # magnetic field (2-form) b[0] = eva3.evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span1, - span2 - 1, - span3 - 1, - nbase_n[0], - nbase_d[1], - nbase_d[2], - b2_1, + pn1, pd2, pd3, bn1, bd2, bd3, span1, span2 - 1, span3 - 1, nbase_n[0], nbase_d[1], nbase_d[2], b2_1 ) b[1] = eva3.evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span1 - 1, - span2, - span3 - 1, - nbase_d[0], - nbase_n[1], - nbase_d[2], - b2_2, + pd1, pn2, pd3, bd1, bn2, bd3, span1 - 1, span2, span3 - 1, nbase_d[0], nbase_n[1], nbase_d[2], b2_2 ) b[2] = eva3.evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span1 - 1, - span2 - 1, - span3, - nbase_d[0], - nbase_d[1], - nbase_n[2], - b2_3, + pd1, pd2, pn3, bd1, bd2, bn3, span1 - 1, span2 - 1, span3, nbase_d[0], nbase_d[1], nbase_n[2], b2_3 ) # push-forward to physical domain diff --git a/src/struphy/pic/tests/test_pic_legacy_files/spline_evaluation_2d.py b/src/struphy/pic/tests/test_pic_legacy_files/spline_evaluation_2d.py index fdd4485b5..ba32b93bf 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/spline_evaluation_2d.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/spline_evaluation_2d.py @@ -400,7 +400,7 @@ def evaluate_tensor_product( Returns: -------- - values: double[:, :] values of spline at points from xp.meshgrid(eta1, eta2, indexing='ij'). + values: double[:, :] values of spline at points from np.meshgrid(eta1, eta2, indexing='ij'). """ for i1 in range(len(eta1)): diff --git a/src/struphy/pic/tests/test_pic_legacy_files/spline_evaluation_3d.py b/src/struphy/pic/tests/test_pic_legacy_files/spline_evaluation_3d.py index 7923b3966..28e2b5d9c 100644 --- a/src/struphy/pic/tests/test_pic_legacy_files/spline_evaluation_3d.py +++ b/src/struphy/pic/tests/test_pic_legacy_files/spline_evaluation_3d.py @@ -127,19 +127,7 @@ def evaluate_n_n_n( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span_n1, - span_n2, - span_n3, - nbase_n1, - nbase_n2, - nbase_n3, - coeff, + pn1, pn2, pn3, bn1, bn2, bn3, span_n1, span_n2, span_n3, nbase_n1, nbase_n2, nbase_n3, coeff ) return value @@ -201,19 +189,7 @@ def evaluate_diffn_n_n( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span_n1, - span_n2, - span_n3, - nbase_n1, - nbase_n2, - nbase_n3, - coeff, + pn1, pn2, pn3, bn1, bn2, bn3, span_n1, span_n2, span_n3, nbase_n1, nbase_n2, nbase_n3, coeff ) return value @@ -275,19 +251,7 @@ def evaluate_n_diffn_n( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span_n1, - span_n2, - span_n3, - nbase_n1, - nbase_n2, - nbase_n3, - coeff, + pn1, pn2, pn3, bn1, bn2, bn3, span_n1, span_n2, span_n3, nbase_n1, nbase_n2, nbase_n3, coeff ) return value @@ -349,19 +313,7 @@ def evaluate_n_n_diffn( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pn1, - pn2, - pn3, - bn1, - bn2, - bn3, - span_n1, - span_n2, - span_n3, - nbase_n1, - nbase_n2, - nbase_n3, - coeff, + pn1, pn2, pn3, bn1, bn2, bn3, span_n1, span_n2, span_n3, nbase_n1, nbase_n2, nbase_n3, coeff ) return value @@ -425,19 +377,7 @@ def evaluate_d_n_n( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pd1, - pn2, - pn3, - bd1, - bn2, - bn3, - span_d1, - span_n2, - span_n3, - nbase_d1, - nbase_n2, - nbase_n3, - coeff, + pd1, pn2, pn3, bd1, bn2, bn3, span_d1, span_n2, span_n3, nbase_d1, nbase_n2, nbase_n3, coeff ) return value @@ -501,19 +441,7 @@ def evaluate_n_d_n( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pn1, - pd2, - pn3, - bn1, - bd2, - bn3, - span_n1, - span_d2, - span_n3, - nbase_n1, - nbase_d2, - nbase_n3, - coeff, + pn1, pd2, pn3, bn1, bd2, bn3, span_n1, span_d2, span_n3, nbase_n1, nbase_d2, nbase_n3, coeff ) return value @@ -577,19 +505,7 @@ def evaluate_n_n_d( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pn1, - pn2, - pd3, - bn1, - bn2, - bd3, - span_n1, - span_n2, - span_d3, - nbase_n1, - nbase_n2, - nbase_d3, - coeff, + pn1, pn2, pd3, bn1, bn2, bd3, span_n1, span_n2, span_d3, nbase_n1, nbase_n2, nbase_d3, coeff ) return value @@ -654,19 +570,7 @@ def evaluate_n_d_d( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pn1, - pd2, - pd3, - bn1, - bd2, - bd3, - span_n1, - span_d2, - span_d3, - nbase_n1, - nbase_d2, - nbase_d3, - coeff, + pn1, pd2, pd3, bn1, bd2, bd3, span_n1, span_d2, span_d3, nbase_n1, nbase_d2, nbase_d3, coeff ) return value @@ -731,19 +635,7 @@ def evaluate_d_n_d( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pd1, - pn2, - pd3, - bd1, - bn2, - bd3, - span_d1, - span_n2, - span_d3, - nbase_d1, - nbase_n2, - nbase_d3, - coeff, + pd1, pn2, pd3, bd1, bn2, bd3, span_d1, span_n2, span_d3, nbase_d1, nbase_n2, nbase_d3, coeff ) return value @@ -808,19 +700,7 @@ def evaluate_d_d_n( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pd1, - pd2, - pn3, - bd1, - bd2, - bn3, - span_d1, - span_d2, - span_n3, - nbase_d1, - nbase_d2, - nbase_n3, - coeff, + pd1, pd2, pn3, bd1, bd2, bn3, span_d1, span_d2, span_n3, nbase_d1, nbase_d2, nbase_n3, coeff ) return value @@ -886,19 +766,7 @@ def evaluate_d_d_d( # sum up non-vanishing contributions value = evaluation_kernel_3d( - pd1, - pd2, - pd3, - bd1, - bd2, - bd3, - span_d1, - span_d2, - span_d3, - nbase_d1, - nbase_d2, - nbase_d3, - coeff, + pd1, pd2, pd3, bd1, bd2, bd3, span_d1, span_d2, span_d3, nbase_d1, nbase_d2, nbase_d3, coeff ) return value @@ -938,7 +806,7 @@ def evaluate_tensor_product( Returns: -------- values: double[:, :, :] values of spline at points from - xp.meshgrid(eta1, eta2, eta3, indexing='ij'). + np.meshgrid(eta1, eta2, eta3, indexing='ij'). """ for i1 in range(len(eta1)): @@ -947,137 +815,41 @@ def evaluate_tensor_product( # V0 - space if kind == 0: values[i1, i2, i3] = evaluate_n_n_n( - t1, - t2, - t3, - p1, - p2, - p3, - nbase_1, - nbase_2, - nbase_3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + t1, t2, t3, p1, p2, p3, nbase_1, nbase_2, nbase_3, coeff, eta1[i1], eta2[i2], eta3[i3] ) # V1 - space elif kind == 11: values[i1, i2, i3] = evaluate_d_n_n( - t1, - t2, - t3, - p1, - p2, - p3, - nbase_1, - nbase_2, - nbase_3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + t1, t2, t3, p1, p2, p3, nbase_1, nbase_2, nbase_3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 12: values[i1, i2, i3] = evaluate_n_d_n( - t1, - t2, - t3, - p1, - p2, - p3, - nbase_1, - nbase_2, - nbase_3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + t1, t2, t3, p1, p2, p3, nbase_1, nbase_2, nbase_3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 13: values[i1, i2, i3] = evaluate_n_n_d( - t1, - t2, - t3, - p1, - p2, - p3, - nbase_1, - nbase_2, - nbase_3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + t1, t2, t3, p1, p2, p3, nbase_1, nbase_2, nbase_3, coeff, eta1[i1], eta2[i2], eta3[i3] ) # V2 - space elif kind == 21: values[i1, i2, i3] = evaluate_n_d_d( - t1, - t2, - t3, - p1, - p2, - p3, - nbase_1, - nbase_2, - nbase_3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + t1, t2, t3, p1, p2, p3, nbase_1, nbase_2, nbase_3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 22: values[i1, i2, i3] = evaluate_d_n_d( - t1, - t2, - t3, - p1, - p2, - p3, - nbase_1, - nbase_2, - nbase_3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + t1, t2, t3, p1, p2, p3, nbase_1, nbase_2, nbase_3, coeff, eta1[i1], eta2[i2], eta3[i3] ) elif kind == 23: values[i1, i2, i3] = evaluate_d_d_n( - t1, - t2, - t3, - p1, - p2, - p3, - nbase_1, - nbase_2, - nbase_3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + t1, t2, t3, p1, p2, p3, nbase_1, nbase_2, nbase_3, coeff, eta1[i1], eta2[i2], eta3[i3] ) # V3 - space elif kind == 3: values[i1, i2, i3] = evaluate_d_d_d( - t1, - t2, - t3, - p1, - p2, - p3, - nbase_1, - nbase_2, - nbase_3, - coeff, - eta1[i1], - eta2[i2], - eta3[i3], + t1, t2, t3, p1, p2, p3, nbase_1, nbase_2, nbase_3, coeff, eta1[i1], eta2[i2], eta3[i3] ) diff --git a/src/struphy/pic/tests/test_pushers.py b/src/struphy/pic/tests/test_pushers.py index 321ab9aba..9dc56f127 100644 --- a/src/struphy/pic/tests/test_pushers.py +++ b/src/struphy/pic/tests/test_pushers.py @@ -6,8 +6,7 @@ @pytest.mark.parametrize("Nel", [[8, 9, 5], [7, 8, 9]]) @pytest.mark.parametrize("p", [[2, 3, 1], [1, 2, 3]]) @pytest.mark.parametrize( - "spl_kind", - [[False, True, True], [True, False, True], [False, False, True], [True, True, True]], + "spl_kind", [[False, True, True], [True, False, True], [False, False, True], [True, True, True]] ) @pytest.mark.parametrize( "mapping", @@ -24,7 +23,6 @@ ], ) def test_push_vxb_analytic(Nel, p, spl_kind, mapping, show_plots=False): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space @@ -35,7 +33,7 @@ def test_push_vxb_analytic(Nel, p, spl_kind, mapping, show_plots=False): from struphy.pic.pushing import pusher_kernels from struphy.pic.pushing.pusher import Pusher as Pusher_psy from struphy.pic.tests.test_pic_legacy_files.pusher import Pusher as Pusher_str - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -61,12 +59,14 @@ def test_push_vxb_analytic(Nel, p, spl_kind, mapping, show_plots=False): # particle loading and sorting seed = 1234 - loading_params = LoadingParameters(ppc=2, seed=seed, moments=(0.0, 0.0, 0.0, 1.0, 1.0, 1.0), spatial="uniform") + loader_params = {"seed": seed, "moments": [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], "spatial": "uniform"} particles = Particles6D( comm_world=comm, + ppc=2, domain_decomp=domain_decomp, - loading_params=loading_params, + bc=["periodic", "periodic", "periodic"], + loading_params=loader_params, ) particles.draw_markers() @@ -126,7 +126,7 @@ def test_push_vxb_analytic(Nel, p, spl_kind, mapping, show_plots=False): ) # compare if markers are the same BEFORE push - assert xp.allclose(particles.markers, markers_str.T) + assert np.allclose(particles.markers, markers_str.T) # push markers dt = 0.1 @@ -136,14 +136,13 @@ def test_push_vxb_analytic(Nel, p, spl_kind, mapping, show_plots=False): pusher_psy(dt) # compare if markers are the same AFTER push - assert xp.allclose(particles.markers[:, :6], markers_str.T[:, :6]) + assert np.allclose(particles.markers[:, :6], markers_str.T[:, :6]) @pytest.mark.parametrize("Nel", [[8, 9, 5], [7, 8, 9]]) @pytest.mark.parametrize("p", [[2, 3, 1], [1, 2, 3]]) @pytest.mark.parametrize( - "spl_kind", - [[False, True, True], [True, False, True], [False, False, True], [True, True, True]], + "spl_kind", [[False, True, True], [True, False, True], [False, False, True], [True, True, True]] ) @pytest.mark.parametrize( "mapping", @@ -160,7 +159,6 @@ def test_push_vxb_analytic(Nel, p, spl_kind, mapping, show_plots=False): ], ) def test_push_bxu_Hdiv(Nel, p, spl_kind, mapping, show_plots=False): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space @@ -171,7 +169,7 @@ def test_push_bxu_Hdiv(Nel, p, spl_kind, mapping, show_plots=False): from struphy.pic.pushing import pusher_kernels from struphy.pic.pushing.pusher import Pusher as Pusher_psy from struphy.pic.tests.test_pic_legacy_files.pusher import Pusher as Pusher_str - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -197,12 +195,14 @@ def test_push_bxu_Hdiv(Nel, p, spl_kind, mapping, show_plots=False): # particle loading and sorting seed = 1234 - loading_params = LoadingParameters(ppc=2, seed=seed, moments=(0.0, 0.0, 0.0, 1.0, 1.0, 1.0), spatial="uniform") + loader_params = {"seed": seed, "moments": [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], "spatial": "uniform"} particles = Particles6D( comm_world=comm, + ppc=2, domain_decomp=domain_decomp, - loading_params=loading_params, + bc=["periodic", "periodic", "periodic"], + loading_params=loader_params, ) particles.draw_markers() @@ -252,8 +252,8 @@ def test_push_bxu_Hdiv(Nel, p, spl_kind, mapping, show_plots=False): basis_u=2, bc_pos=0, ) - mu0_str = xp.zeros(markers_str.shape[1], dtype=float) - pow_str = xp.zeros(markers_str.shape[1], dtype=float) + mu0_str = np.zeros(markers_str.shape[1], dtype=float) + pow_str = np.zeros(markers_str.shape[1], dtype=float) pusher_psy = Pusher_psy( particles, @@ -273,7 +273,7 @@ def test_push_bxu_Hdiv(Nel, p, spl_kind, mapping, show_plots=False): ) # compare if markers are the same BEFORE push - assert xp.allclose(particles.markers, markers_str.T) + assert np.allclose(particles.markers, markers_str.T) # push markers dt = 0.1 @@ -283,14 +283,13 @@ def test_push_bxu_Hdiv(Nel, p, spl_kind, mapping, show_plots=False): pusher_psy(dt) # compare if markers are the same AFTER push - assert xp.allclose(particles.markers[:, :6], markers_str.T[:, :6]) + assert np.allclose(particles.markers[:, :6], markers_str.T[:, :6]) @pytest.mark.parametrize("Nel", [[8, 9, 5], [7, 8, 9]]) @pytest.mark.parametrize("p", [[2, 3, 1], [1, 2, 3]]) @pytest.mark.parametrize( - "spl_kind", - [[False, True, True], [True, False, True], [False, False, True], [True, True, True]], + "spl_kind", [[False, True, True], [True, False, True], [False, False, True], [True, True, True]] ) @pytest.mark.parametrize( "mapping", @@ -307,7 +306,6 @@ def test_push_bxu_Hdiv(Nel, p, spl_kind, mapping, show_plots=False): ], ) def test_push_bxu_Hcurl(Nel, p, spl_kind, mapping, show_plots=False): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space @@ -318,7 +316,7 @@ def test_push_bxu_Hcurl(Nel, p, spl_kind, mapping, show_plots=False): from struphy.pic.pushing import pusher_kernels from struphy.pic.pushing.pusher import Pusher as Pusher_psy from struphy.pic.tests.test_pic_legacy_files.pusher import Pusher as Pusher_str - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -344,12 +342,14 @@ def test_push_bxu_Hcurl(Nel, p, spl_kind, mapping, show_plots=False): # particle loading and sorting seed = 1234 - loading_params = LoadingParameters(ppc=2, seed=seed, moments=(0.0, 0.0, 0.0, 1.0, 1.0, 1.0), spatial="uniform") + loader_params = {"seed": seed, "moments": [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], "spatial": "uniform"} particles = Particles6D( comm_world=comm, + ppc=2, domain_decomp=domain_decomp, - loading_params=loading_params, + bc=["periodic", "periodic", "periodic"], + loading_params=loader_params, ) particles.draw_markers() @@ -399,8 +399,8 @@ def test_push_bxu_Hcurl(Nel, p, spl_kind, mapping, show_plots=False): basis_u=1, bc_pos=0, ) - mu0_str = xp.zeros(markers_str.shape[1], dtype=float) - pow_str = xp.zeros(markers_str.shape[1], dtype=float) + mu0_str = np.zeros(markers_str.shape[1], dtype=float) + pow_str = np.zeros(markers_str.shape[1], dtype=float) pusher_psy = Pusher_psy( particles, @@ -420,7 +420,7 @@ def test_push_bxu_Hcurl(Nel, p, spl_kind, mapping, show_plots=False): ) # compare if markers are the same BEFORE push - assert xp.allclose(particles.markers, markers_str.T) + assert np.allclose(particles.markers, markers_str.T) # push markers dt = 0.1 @@ -430,14 +430,13 @@ def test_push_bxu_Hcurl(Nel, p, spl_kind, mapping, show_plots=False): pusher_psy(dt) # compare if markers are the same AFTER push - assert xp.allclose(particles.markers[:, :6], markers_str.T[:, :6]) + assert np.allclose(particles.markers[:, :6], markers_str.T[:, :6]) @pytest.mark.parametrize("Nel", [[8, 9, 5], [7, 8, 9]]) @pytest.mark.parametrize("p", [[2, 3, 1], [1, 2, 3]]) @pytest.mark.parametrize( - "spl_kind", - [[False, True, True], [True, False, True], [False, False, True], [True, True, True]], + "spl_kind", [[False, True, True], [True, False, True], [False, False, True], [True, True, True]] ) @pytest.mark.parametrize( "mapping", @@ -454,7 +453,6 @@ def test_push_bxu_Hcurl(Nel, p, spl_kind, mapping, show_plots=False): ], ) def test_push_bxu_H1vec(Nel, p, spl_kind, mapping, show_plots=False): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space @@ -465,7 +463,7 @@ def test_push_bxu_H1vec(Nel, p, spl_kind, mapping, show_plots=False): from struphy.pic.pushing import pusher_kernels from struphy.pic.pushing.pusher import Pusher as Pusher_psy from struphy.pic.tests.test_pic_legacy_files.pusher import Pusher as Pusher_str - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -491,12 +489,14 @@ def test_push_bxu_H1vec(Nel, p, spl_kind, mapping, show_plots=False): # particle loading and sorting seed = 1234 - loading_params = LoadingParameters(ppc=2, seed=seed, moments=(0.0, 0.0, 0.0, 1.0, 1.0, 1.0), spatial="uniform") + loader_params = {"seed": seed, "moments": [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], "spatial": "uniform"} particles = Particles6D( comm_world=comm, + ppc=2, domain_decomp=domain_decomp, - loading_params=loading_params, + bc=["periodic", "periodic", "periodic"], + loading_params=loader_params, ) particles.draw_markers() @@ -546,8 +546,8 @@ def test_push_bxu_H1vec(Nel, p, spl_kind, mapping, show_plots=False): basis_u=0, bc_pos=0, ) - mu0_str = xp.zeros(markers_str.shape[1], dtype=float) - pow_str = xp.zeros(markers_str.shape[1], dtype=float) + mu0_str = np.zeros(markers_str.shape[1], dtype=float) + pow_str = np.zeros(markers_str.shape[1], dtype=float) pusher_psy = Pusher_psy( particles, @@ -567,7 +567,7 @@ def test_push_bxu_H1vec(Nel, p, spl_kind, mapping, show_plots=False): ) # compare if markers are the same BEFORE push - assert xp.allclose(particles.markers, markers_str.T) + assert np.allclose(particles.markers, markers_str.T) # push markers dt = 0.1 @@ -577,14 +577,13 @@ def test_push_bxu_H1vec(Nel, p, spl_kind, mapping, show_plots=False): pusher_psy(dt) # compare if markers are the same AFTER push - assert xp.allclose(particles.markers[:, :6], markers_str.T[:, :6]) + assert np.allclose(particles.markers[:, :6], markers_str.T[:, :6]) @pytest.mark.parametrize("Nel", [[8, 9, 5], [7, 8, 9]]) @pytest.mark.parametrize("p", [[2, 3, 1], [1, 2, 3]]) @pytest.mark.parametrize( - "spl_kind", - [[False, True, True], [True, False, True], [False, False, True], [True, True, True]], + "spl_kind", [[False, True, True], [True, False, True], [False, False, True], [True, True, True]] ) @pytest.mark.parametrize( "mapping", @@ -601,7 +600,6 @@ def test_push_bxu_H1vec(Nel, p, spl_kind, mapping, show_plots=False): ], ) def test_push_bxu_Hdiv_pauli(Nel, p, spl_kind, mapping, show_plots=False): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space @@ -612,7 +610,7 @@ def test_push_bxu_Hdiv_pauli(Nel, p, spl_kind, mapping, show_plots=False): from struphy.pic.pushing import pusher_kernels from struphy.pic.pushing.pusher import Pusher as Pusher_psy from struphy.pic.tests.test_pic_legacy_files.pusher import Pusher as Pusher_str - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -638,12 +636,14 @@ def test_push_bxu_Hdiv_pauli(Nel, p, spl_kind, mapping, show_plots=False): # particle loading and sorting seed = 1234 - loading_params = LoadingParameters(ppc=2, seed=seed, moments=(0.0, 0.0, 0.0, 1.0, 1.0, 1.0), spatial="uniform") + loader_params = {"seed": seed, "moments": [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], "spatial": "uniform"} particles = Particles6D( comm_world=comm, + ppc=2, domain_decomp=domain_decomp, - loading_params=loading_params, + bc=["periodic", "periodic", "periodic"], + loading_params=loader_params, ) particles.draw_markers() @@ -693,8 +693,8 @@ def test_push_bxu_Hdiv_pauli(Nel, p, spl_kind, mapping, show_plots=False): basis_u=2, bc_pos=0, ) - mu0_str = xp.random.rand(markers_str.shape[1]) - pow_str = xp.zeros(markers_str.shape[1], dtype=float) + mu0_str = np.random.rand(markers_str.shape[1]) + pow_str = np.zeros(markers_str.shape[1], dtype=float) pusher_psy = Pusher_psy( particles, @@ -716,7 +716,7 @@ def test_push_bxu_Hdiv_pauli(Nel, p, spl_kind, mapping, show_plots=False): ) # compare if markers are the same BEFORE push - assert xp.allclose(particles.markers, markers_str.T) + assert np.allclose(particles.markers, markers_str.T) # push markers dt = 0.1 @@ -726,14 +726,13 @@ def test_push_bxu_Hdiv_pauli(Nel, p, spl_kind, mapping, show_plots=False): pusher_psy(dt) # compare if markers are the same AFTER push - assert xp.allclose(particles.markers[:, :6], markers_str.T[:, :6]) + assert np.allclose(particles.markers[:, :6], markers_str.T[:, :6]) @pytest.mark.parametrize("Nel", [[8, 9, 5], [7, 8, 9]]) @pytest.mark.parametrize("p", [[2, 3, 1], [1, 2, 3]]) @pytest.mark.parametrize( - "spl_kind", - [[False, True, True], [True, False, True], [False, False, True], [True, True, True]], + "spl_kind", [[False, True, True], [True, False, True], [False, False, True], [True, True, True]] ) @pytest.mark.parametrize( "mapping", @@ -750,7 +749,6 @@ def test_push_bxu_Hdiv_pauli(Nel, p, spl_kind, mapping, show_plots=False): ], ) def test_push_eta_rk4(Nel, p, spl_kind, mapping, show_plots=False): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space @@ -762,7 +760,7 @@ def test_push_eta_rk4(Nel, p, spl_kind, mapping, show_plots=False): from struphy.pic.pushing import pusher_kernels from struphy.pic.pushing.pusher import Pusher as Pusher_psy from struphy.pic.tests.test_pic_legacy_files.pusher import Pusher as Pusher_str - from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -789,12 +787,14 @@ def test_push_eta_rk4(Nel, p, spl_kind, mapping, show_plots=False): # particle loading and sorting seed = 1234 - loading_params = LoadingParameters(ppc=2, seed=seed, moments=(0.0, 0.0, 0.0, 1.0, 1.0, 1.0), spatial="uniform") + loader_params = {"seed": seed, "moments": [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], "spatial": "uniform"} particles = Particles6D( comm_world=comm, + ppc=2, domain_decomp=domain_decomp, - loading_params=loading_params, + bc=["periodic", "periodic", "periodic"], + loading_params=loader_params, ) particles.draw_markers() @@ -836,8 +836,8 @@ def test_push_eta_rk4(Nel, p, spl_kind, mapping, show_plots=False): butcher = ButcherTableau("rk4") # temp fix due to refactoring of ButcherTableau: - butcher._a = xp.diag(butcher.a, k=-1) - butcher._a = xp.array(list(butcher._a) + [0.0]) + butcher._a = np.diag(butcher.a, k=-1) + butcher._a = np.array(list(butcher._a) + [0.0]) pusher_psy = Pusher_psy( particles, @@ -849,7 +849,7 @@ def test_push_eta_rk4(Nel, p, spl_kind, mapping, show_plots=False): ) # compare if markers are the same BEFORE push - assert xp.allclose(particles.markers, markers_str.T) + assert np.allclose(particles.markers, markers_str.T) # push markers dt = 0.1 @@ -857,12 +857,12 @@ def test_push_eta_rk4(Nel, p, spl_kind, mapping, show_plots=False): pusher_str.push_step4(markers_str, dt) pusher_psy(dt) - n_mks_load = xp.zeros(size, dtype=int) + n_mks_load = np.zeros(size, dtype=int) - comm.Allgather(xp.array(xp.shape(particles.markers)[0]), n_mks_load) + comm.Allgather(np.array(np.shape(particles.markers)[0]), n_mks_load) - sendcounts = xp.zeros(size, dtype=int) - displacements = xp.zeros(size, dtype=int) + sendcounts = np.zeros(size, dtype=int) + displacements = np.zeros(size, dtype=int) accum_sendcounts = 0.0 for i in range(size): @@ -870,27 +870,23 @@ def test_push_eta_rk4(Nel, p, spl_kind, mapping, show_plots=False): displacements[i] = accum_sendcounts accum_sendcounts += sendcounts[i] - all_particles_psy = xp.zeros((int(accum_sendcounts) * 3,), dtype=float) - all_particles_str = xp.zeros((int(accum_sendcounts) * 3,), dtype=float) + all_particles_psy = np.zeros((int(accum_sendcounts) * 3,), dtype=float) + all_particles_str = np.zeros((int(accum_sendcounts) * 3,), dtype=float) comm.Barrier() - comm.Allgatherv(xp.array(particles.markers[:, :3]), [all_particles_psy, sendcounts, displacements, MPI.DOUBLE]) - comm.Allgatherv(xp.array(markers_str.T[:, :3]), [all_particles_str, sendcounts, displacements, MPI.DOUBLE]) + comm.Allgatherv(np.array(particles.markers[:, :3]), [all_particles_psy, sendcounts, displacements, MPI.DOUBLE]) + comm.Allgatherv(np.array(markers_str.T[:, :3]), [all_particles_str, sendcounts, displacements, MPI.DOUBLE]) comm.Barrier() - unique_psy = xp.unique(all_particles_psy) - unique_str = xp.unique(all_particles_str) + unique_psy = np.unique(all_particles_psy) + unique_str = np.unique(all_particles_str) - assert xp.allclose(unique_psy, unique_str) + assert np.allclose(unique_psy, unique_str) if __name__ == "__main__": test_push_vxb_analytic( - [8, 9, 5], - [4, 2, 3], - [False, True, True], - ["Colella", {"Lx": 2.0, "Ly": 2.0, "alpha": 0.1, "Lz": 4.0}], - False, + [8, 9, 5], [4, 2, 3], [False, True, True], ["Colella", {"Lx": 2.0, "Ly": 2.0, "alpha": 0.1, "Lz": 4.0}], False ) # test_push_bxu_Hdiv([8, 9, 5], [4, 2, 3], [False, True, True], ['Colella', { # 'Lx': 2., 'Ly': 2., 'alpha': 0.1, 'Lz': 4.}], False) diff --git a/src/struphy/pic/tests/test_sorting.py b/src/struphy/pic/tests/test_sorting.py index 0daf8f4c9..48b7cad6f 100644 --- a/src/struphy/pic/tests/test_sorting.py +++ b/src/struphy/pic/tests/test_sorting.py @@ -1,65 +1,24 @@ from time import time -import cunumpy as xp import pytest from psydac.ddm.mpi import mpi as MPI from struphy.feec.psydac_derham import Derham from struphy.geometry import domains from struphy.pic.particles import Particles6D -from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters +from struphy.utils.arrays import xp as np @pytest.mark.parametrize("nx", [8, 70]) @pytest.mark.parametrize("ny", [16, 80]) @pytest.mark.parametrize("nz", [32, 90]) @pytest.mark.parametrize("algo", ["fortran_ordering", "c_ordering"]) -def test_flattening_1(nx, ny, nz, algo): +def test_flattening(nx, ny, nz, algo): from struphy.pic.sorting_kernels import flatten_index, unflatten_index - n1s = xp.array(xp.random.rand(10) * (nx + 1), dtype=int) - n2s = xp.array(xp.random.rand(10) * (ny + 1), dtype=int) - n3s = xp.array(xp.random.rand(10) * (nz + 1), dtype=int) - for n1 in n1s: - for n2 in n2s: - for n3 in n3s: - n_glob = flatten_index(int(n1), int(n2), int(n3), nx, ny, nz, algo) - n1n, n2n, n3n = unflatten_index(n_glob, nx, ny, nz, algo) - assert n1n == n1 - assert n2n == n2 - assert n3n == n3 - - -@pytest.mark.parametrize("nx", [8, 70]) -@pytest.mark.parametrize("ny", [16, 80]) -@pytest.mark.parametrize("nz", [32, 90]) -@pytest.mark.parametrize("algo", ["fortran_ordering", "c_ordering"]) -def test_flattening_2(nx, ny, nz, algo): - from struphy.pic.sorting_kernels import flatten_index, unflatten_index - - n1s = xp.array(xp.random.rand(10) * (nx + 1), dtype=int) - n2s = xp.array(xp.random.rand(10) * (ny + 1), dtype=int) - n3s = xp.array(xp.random.rand(10) * (nz + 1), dtype=int) - for n1 in n1s: - for n2 in n2s: - for n3 in n3s: - n_glob = flatten_index(int(n1), int(n2), int(n3), nx, ny, nz, algo) - n1n, n2n, n3n = unflatten_index(n_glob, nx, ny, nz, algo) - assert n1n == n1 - assert n2n == n2 - assert n3n == n3 - - -@pytest.mark.parametrize("nx", [8, 70]) -@pytest.mark.parametrize("ny", [16, 80]) -@pytest.mark.parametrize("nz", [32, 90]) -@pytest.mark.parametrize("algo", ["fortran_ordering", "c_ordering"]) -def test_flattening_3(nx, ny, nz, algo): - from struphy.pic.sorting_kernels import flatten_index, unflatten_index - - n1s = xp.array(xp.random.rand(10) * (nx + 1), dtype=int) - n2s = xp.array(xp.random.rand(10) * (ny + 1), dtype=int) - n3s = xp.array(xp.random.rand(10) * (nz + 1), dtype=int) + n1s = np.array(np.random.rand(10) * (nx + 1), dtype=int) + n2s = np.array(np.random.rand(10) * (ny + 1), dtype=int) + n3s = np.array(np.random.rand(10) * (nz + 1), dtype=int) for n1 in n1s: for n2 in n2s: for n3 in n3s: @@ -73,8 +32,7 @@ def test_flattening_3(nx, ny, nz, algo): @pytest.mark.parametrize("Nel", [[8, 9, 10]]) @pytest.mark.parametrize("p", [[2, 3, 4]]) @pytest.mark.parametrize( - "spl_kind", - [[False, False, True], [False, True, False], [True, False, True], [True, True, False]], + "spl_kind", [[False, False, True], [False, True, False], [True, False, True], [True, True, False]] ) @pytest.mark.parametrize( "mapping", @@ -111,11 +69,13 @@ def test_sorting(Nel, p, spl_kind, mapping, Np, verbose=False): nprocs = derham.domain_decomposition.nprocs domain_decomp = (domain_array, nprocs) - loading_params = LoadingParameters(Np=Np, seed=1607, moments=(0.0, 0.0, 0.0, 1.0, 2.0, 3.0), spatial="uniform") + loading_params = {"seed": 1607, "moments": [0.0, 0.0, 0.0, 1.0, 2.0, 3.0], "spatial": "uniform"} boxes_per_dim = (3, 3, 6) particles = Particles6D( comm_world=mpi_comm, + Np=Np, + bc=["periodic", "periodic", "periodic"], loading_params=loading_params, domain_decomp=domain_decomp, boxes_per_dim=boxes_per_dim, @@ -136,7 +96,7 @@ def test_sorting(Nel, p, spl_kind, mapping, Np, verbose=False): if __name__ == "__main__": - test_flattening_1(8, 8, 8, "c_orderwding") + test_flattening(8, 8, 8, "c_orderwding") # test_sorting( # [8, 9, 10], # [2, 3, 4], diff --git a/src/struphy/pic/tests/test_sph.py b/src/struphy/pic/tests/test_sph.py index 294e7f9dc..889941b88 100644 --- a/src/struphy/pic/tests/test_sph.py +++ b/src/struphy/pic/tests/test_sph.py @@ -1,14 +1,11 @@ -import cunumpy as xp import pytest from matplotlib import pyplot as plt from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI -from struphy.fields_background.equils import ConstantVelocity from struphy.geometry import domains -from struphy.initial import perturbations from struphy.pic.particles import ParticlesSPH -from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters +from struphy.utils.arrays import xp as np @pytest.mark.parametrize("boxes_per_dim", [(24, 1, 1)]) @@ -40,48 +37,56 @@ def test_sph_evaluation_1d( domain = domain_class(**dom_params) if tesselation: + loading = "tesselation" + loading_params = {"n_quad": 1} if kernel == "trigonometric_1d" and derivative == 1: ppb = 100 else: ppb = 4 - loading_params = LoadingParameters(ppb=ppb, seed=1607, loading="tesselation") else: + loading = "pseudo_random" + loading_params = {"seed": 223} if derivative == 0: ppb = 1000 else: ppb = 20000 - loading_params = LoadingParameters(ppb=ppb, seed=223) # background - background = ConstantVelocity(n=1.5, density_profile="constant") - background.domain = domain + cst_vel = {"density_profile": "constant", "n": 1.5} + bckgr_params = {"ConstantVelocity": cst_vel, "pforms": ["vol", None]} - pert = {"n": perturbations.ModesCos(ls=(1,), amps=(1e-0,))} + mode_params = {"given_in_basis": "0", "ls": [1], "amps": [1e-0]} + modes = {"ModesCos": mode_params} + pert_params = {"n": modes} if derivative == 0: - fun_exact = lambda e1, e2, e3: 1.5 + xp.cos(2 * xp.pi * e1) + fun_exact = lambda e1, e2, e3: 1.5 + np.cos(2 * np.pi * e1) else: - fun_exact = lambda e1, e2, e3: -2 * xp.pi * xp.sin(2 * xp.pi * e1) + fun_exact = lambda e1, e2, e3: -2 * np.pi * np.sin(2 * np.pi * e1) - boundary_params = BoundaryParameters(bc_sph=(bc_x, "periodic", "periodic")) + # boundary conditions + bc_sph = [bc_x, "periodic", "periodic"] + + # eval points + eta1 = np.linspace(0, 1.0, eval_pts) + eta2 = np.array([0.0]) + eta3 = np.array([0.0]) + # particles object particles = ParticlesSPH( comm_world=comm, - loading_params=loading_params, - boundary_params=boundary_params, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc_sph=bc_sph, bufsize=1.0, + loading=loading, + loading_params=loading_params, domain=domain, - background=background, - perturbations=pert, - n_as_volume_form=True, + bckgr_params=bckgr_params, + pert_params=pert_params, + verbose=False, ) - # eval points - eta1 = xp.linspace(0, 1.0, eval_pts) - eta2 = xp.array([0.0]) - eta3 = xp.array([0.0]) - particles.draw_markers(sort=False, verbose=False) if comm is not None: particles.mpi_sort_markers() @@ -89,7 +94,7 @@ def test_sph_evaluation_1d( h1 = 1 / boxes_per_dim[0] h2 = 1 / boxes_per_dim[1] h3 = 1 / boxes_per_dim[2] - ee1, ee2, ee3 = xp.meshgrid(eta1, eta2, eta3, indexing="ij") + ee1, ee2, ee3 = np.meshgrid(eta1, eta2, eta3, indexing="ij") test_eval = particles.eval_density( ee1, ee2, @@ -104,16 +109,16 @@ def test_sph_evaluation_1d( if comm is None: all_eval = test_eval else: - all_eval = xp.zeros_like(test_eval) + all_eval = np.zeros_like(test_eval) comm.Allreduce(test_eval, all_eval, op=MPI.SUM) exact_eval = fun_exact(ee1, ee2, ee3) - err_max_norm = xp.max(xp.abs(all_eval - exact_eval)) / xp.max(xp.abs(exact_eval)) + err_max_norm = np.max(np.abs(all_eval - exact_eval)) / np.max(np.abs(exact_eval)) if rank == 0: - print(f"\n{boxes_per_dim =}") - print(f"{kernel =}, {derivative =}") - print(f"{bc_x =}, {eval_pts =}, {tesselation =}, {err_max_norm =}") + print(f"\n{boxes_per_dim = }") + print(f"{kernel = }, {derivative =}") + print(f"{bc_x = }, {eval_pts = }, {tesselation = }, {err_max_norm = }") if show_plot: plt.figure(figsize=(12, 8)) plt.plot(ee1.squeeze(), fun_exact(ee1, ee2, ee3).squeeze(), label="exact") @@ -164,45 +169,48 @@ def test_sph_evaluation_2d( domain_class = getattr(domains, dom_type) domain = domain_class(**dom_params) + loading = "tesselation" + loading_params = {"n_quad": 1} if kernel == "trigonometric_2d" and derivative != 0: ppb = 100 else: ppb = 16 - loading_params = LoadingParameters(ppb=ppb, loading="tesselation") - # background - background = ConstantVelocity(n=1.5, density_profile="constant") - background.domain = domain + cst_vel = {"density_profile": "constant", "n": 1.5} + bckgr_params = {"ConstantVelocity": cst_vel, "pforms": ["vol", None]} - pert = {"n": perturbations.ModesCosCos(ls=(1,), ms=(1,), amps=(1e-0,))} + mode_params = {"given_in_basis": "0", "ls": [1], "ms": [1], "amps": [1.0]} + modes = {"ModesCosCos": mode_params} + pert_params = {"n": modes} if derivative == 0: - fun_exact = lambda e1, e2, e3: 1.5 + xp.cos(2 * xp.pi * e1) * xp.cos(2 * xp.pi * e2) + fun_exact = lambda e1, e2, e3: 1.5 + np.cos(2 * np.pi * e1) * np.cos(2 * np.pi * e2) elif derivative == 1: - fun_exact = lambda e1, e2, e3: -2 * xp.pi * xp.sin(2 * xp.pi * e1) * xp.cos(2 * xp.pi * e2) + fun_exact = lambda e1, e2, e3: -2 * np.pi * np.sin(2 * np.pi * e1) * np.cos(2 * np.pi * e2) else: - fun_exact = lambda e1, e2, e3: -2 * xp.pi * xp.cos(2 * xp.pi * e1) * xp.sin(2 * xp.pi * e2) + fun_exact = lambda e1, e2, e3: -2 * np.pi * np.cos(2 * np.pi * e1) * np.sin(2 * np.pi * e2) # boundary conditions - boundary_params = BoundaryParameters(bc_sph=(bc_x, bc_y, "periodic")) + bc_sph = [bc_x, bc_y, "periodic"] # eval points - eta1 = xp.linspace(0, 1.0, eval_pts) - eta2 = xp.linspace(0, 1.0, eval_pts) - eta3 = xp.array([0.0]) + eta1 = np.linspace(0, 1.0, eval_pts) + eta2 = np.linspace(0, 1.0, eval_pts) + eta3 = np.array([0.0]) # particles object particles = ParticlesSPH( comm_world=comm, - loading_params=loading_params, - boundary_params=boundary_params, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc_sph=bc_sph, bufsize=1.0, + loading=loading, + loading_params=loading_params, domain=domain, - background=background, - perturbations=pert, - n_as_volume_form=True, + bckgr_params=bckgr_params, + pert_params=pert_params, verbose=False, ) @@ -213,7 +221,7 @@ def test_sph_evaluation_2d( h1 = 1 / boxes_per_dim[0] h2 = 1 / boxes_per_dim[1] h3 = 1 / boxes_per_dim[2] - ee1, ee2, ee3 = xp.meshgrid(eta1, eta2, eta3, indexing="ij") + ee1, ee2, ee3 = np.meshgrid(eta1, eta2, eta3, indexing="ij") test_eval = particles.eval_density( ee1, ee2, @@ -228,16 +236,16 @@ def test_sph_evaluation_2d( if comm is None: all_eval = test_eval else: - all_eval = xp.zeros_like(test_eval) + all_eval = np.zeros_like(test_eval) comm.Allreduce(test_eval, all_eval, op=MPI.SUM) exact_eval = fun_exact(ee1, ee2, ee3) - err_max_norm = xp.max(xp.abs(all_eval - exact_eval)) / xp.max(xp.abs(exact_eval)) + err_max_norm = np.max(np.abs(all_eval - exact_eval)) / np.max(np.abs(exact_eval)) if rank == 0: - print(f"\n{boxes_per_dim =}") - print(f"{kernel =}, {derivative =}") - print(f"{bc_x =}, {bc_y =}, {eval_pts =}, {tesselation =}, {err_max_norm =}") + print(f"\n{boxes_per_dim = }") + print(f"{kernel = }, {derivative =}") + print(f"{bc_x = }, {bc_y = }, {eval_pts = }, {tesselation = }, {err_max_norm = }") if show_plot: plt.figure(figsize=(12, 24)) plt.subplot(2, 1, 1) @@ -288,16 +296,16 @@ def test_sph_evaluation_3d( domain_class = getattr(domains, dom_type) domain = domain_class(**dom_params) + loading = "tesselation" + loading_params = {"n_quad": 1} if kernel in ("trigonometric_3d", "linear_isotropic_3d") and derivative != 0: ppb = 100 else: ppb = 64 - loading_params = LoadingParameters(ppb=ppb, loading="tesselation") - # background - background = ConstantVelocity(n=1.5, density_profile="constant") - background.domain = domain + cst_vel = {"density_profile": "constant", "n": 1.5} + bckgr_params = {"ConstantVelocity": cst_vel, "pforms": ["vol", None]} if derivative == 0: fun_exact = lambda e1, e2, e3: 1.5 + 0.0 * e1 @@ -305,23 +313,25 @@ def test_sph_evaluation_3d( fun_exact = lambda e1, e2, e3: 0.0 * e1 # boundary conditions - boundary_params = BoundaryParameters(bc_sph=(bc_x, bc_y, bc_z)) + bc_sph = [bc_x, bc_y, bc_z] # eval points - eta1 = xp.linspace(0, 1.0, eval_pts) - eta2 = xp.linspace(0, 1.0, eval_pts) - eta3 = xp.linspace(0, 1.0, eval_pts) + eta1 = np.linspace(0, 1.0, eval_pts) + eta2 = np.linspace(0, 1.0, eval_pts) + eta3 = np.linspace(0, 1.0, eval_pts) # particles object particles = ParticlesSPH( comm_world=comm, - loading_params=loading_params, - boundary_params=boundary_params, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc_sph=bc_sph, bufsize=2.0, + loading=loading, + loading_params=loading_params, domain=domain, - background=background, - n_as_volume_form=True, + bckgr_params=bckgr_params, + # pert_params=pert_params, verbose=False, ) @@ -332,7 +342,7 @@ def test_sph_evaluation_3d( h1 = 1 / boxes_per_dim[0] h2 = 1 / boxes_per_dim[1] h3 = 1 / boxes_per_dim[2] - ee1, ee2, ee3 = xp.meshgrid(eta1, eta2, eta3, indexing="ij") + ee1, ee2, ee3 = np.meshgrid(eta1, eta2, eta3, indexing="ij") test_eval = particles.eval_density( ee1, ee2, @@ -347,35 +357,35 @@ def test_sph_evaluation_3d( if comm is None: all_eval = test_eval else: - all_eval = xp.zeros_like(test_eval) + all_eval = np.zeros_like(test_eval) comm.Allreduce(test_eval, all_eval, op=MPI.SUM) exact_eval = fun_exact(ee1, ee2, ee3) - err_max_norm = xp.max(xp.abs(all_eval - exact_eval)) + err_max_norm = np.max(np.abs(all_eval - exact_eval)) if rank == 0: - print(f"\n{boxes_per_dim =}") - print(f"{kernel =}, {derivative =}") - print(f"{bc_x =}, {bc_y =}, {bc_z =}, {eval_pts =}, {tesselation =}, {err_max_norm =}") + print(f"\n{boxes_per_dim = }") + print(f"{kernel = }, {derivative =}") + print(f"{bc_x = }, {bc_y = }, {bc_z = }, {eval_pts = }, {tesselation = }, {err_max_norm = }") if show_plot: - print(f"\n{fun_exact(ee1, ee2, ee3)[5, 5, 5] =}") - print(f"{ee1[5, 5, 5] =}, {ee2[5, 5, 5] =}, {ee3[5, 5, 5] =}") - print(f"{all_eval[5, 5, 5] =}") + print(f"\n{fun_exact(ee1, ee2, ee3)[5, 5, 5] = }") + print(f"{ee1[5, 5, 5] = }, {ee2[5, 5, 5] = }, {ee3[5, 5, 5] = }") + print(f"{all_eval[5, 5, 5] = }") - print(f"\n{ee1[4, 4, 4] =}, {ee2[4, 4, 4] =}, {ee3[4, 4, 4] =}") - print(f"{all_eval[4, 4, 4] =}") + print(f"\n{ee1[4, 4, 4] = }, {ee2[4, 4, 4] = }, {ee3[4, 4, 4] = }") + print(f"{all_eval[4, 4, 4] = }") - print(f"\n{ee1[3, 3, 3] =}, {ee2[3, 3, 3] =}, {ee3[3, 3, 3] =}") - print(f"{all_eval[3, 3, 3] =}") + print(f"\n{ee1[3, 3, 3] = }, {ee2[3, 3, 3] = }, {ee3[3, 3, 3] = }") + print(f"{all_eval[3, 3, 3] = }") - print(f"\n{ee1[2, 2, 2] =}, {ee2[2, 2, 2] =}, {ee3[2, 2, 2] =}") - print(f"{all_eval[2, 2, 2] =}") + print(f"\n{ee1[2, 2, 2] = }, {ee2[2, 2, 2] = }, {ee3[2, 2, 2] = }") + print(f"{all_eval[2, 2, 2] = }") - print(f"\n{ee1[1, 1, 1] =}, {ee2[1, 1, 1] =}, {ee3[1, 1, 1] =}") - print(f"{all_eval[1, 1, 1] =}") + print(f"\n{ee1[1, 1, 1] = }, {ee2[1, 1, 1] = }, {ee3[1, 1, 1] = }") + print(f"{all_eval[1, 1, 1] = }") - print(f"\n{ee1[0, 0, 0] =}, {ee2[0, 0, 0] =}, {ee3[0, 0, 0] =}") - print(f"{all_eval[0, 0, 0] =}") + print(f"\n{ee1[0, 0, 0] = }, {ee2[0, 0, 0] = }, {ee3[0, 0, 0] = }") + print(f"{all_eval[0, 0, 0] = }") # plt.figure(figsize=(12, 24)) # plt.subplot(2, 1, 1) # plt.pcolor(ee1[0, :, :], ee2[0, :, :], fun_exact(ee1, ee2, ee3)[0, :, :]) @@ -409,52 +419,53 @@ def test_evaluation_SPH_Np_convergence_1d(boxes_per_dim, bc_x, eval_pts, tessela domain = domain_class(**dom_params) if tesselation: + loading = "tesselation" + loading_params = {"n_quad": 1} + # ppbs = [5000, 10000, 15000, 20000, 25000] ppbs = [4, 8, 16, 32, 64] Nps = [None] * len(ppbs) else: + loading = "pseudo_random" + loading_params = {"seed": 1607} Nps = [(2**k) * 10**3 for k in range(-2, 9)] ppbs = [None] * len(Nps) # background - background = ConstantVelocity(n=1.5, density_profile="constant") - background.domain = domain + cst_vel = {"density_profile": "constant", "n": 1.5} + bckgr_params = {"ConstantVelocity": cst_vel, "pforms": ["vol", None]} - # perturbation]} + # perturbation + mode_params = {"given_in_basis": "0", "ls": [1], "amps": [-1e-0]} if bc_x in ("periodic", "fixed"): - fun_exact = lambda e1, e2, e3: 1.5 - xp.sin(2 * xp.pi * e1) - pert = {"n": perturbations.ModesSin(ls=(1,), amps=(-1e-0,))} + fun_exact = lambda e1, e2, e3: 1.5 - np.sin(2 * np.pi * e1) + modes = {"ModesSin": mode_params} elif bc_x == "mirror": - fun_exact = lambda e1, e2, e3: 1.5 - xp.cos(2 * xp.pi * e1) - pert = {"n": perturbations.ModesCos(ls=(1,), amps=(-1e-0,))} + fun_exact = lambda e1, e2, e3: 1.5 - np.cos(2 * np.pi * e1) + modes = {"ModesCos": mode_params} + pert_params = {"n": modes} # exact solution - eta1 = xp.linspace(0, 1.0, eval_pts) # add offset for non-periodic boundary conditions, TODO: implement Neumann - eta2 = xp.array([0.0]) - eta3 = xp.array([0.0]) - ee1, ee2, ee3 = xp.meshgrid(eta1, eta2, eta3, indexing="ij") + eta1 = np.linspace(0, 1.0, eval_pts) # add offset for non-periodic boundary conditions, TODO: implement Neumann + eta2 = np.array([0.0]) + eta3 = np.array([0.0]) + ee1, ee2, ee3 = np.meshgrid(eta1, eta2, eta3, indexing="ij") exact_eval = fun_exact(ee1, ee2, ee3) - # boundary conditions - boundary_params = BoundaryParameters(bc_sph=(bc_x, "periodic", "periodic")) - # loop err_vec = [] for Np, ppb in zip(Nps, ppbs): - if tesselation: - loading_params = LoadingParameters(ppb=ppb, loading="tesselation") - else: - loading_params = LoadingParameters(Np=Np, seed=1607) - particles = ParticlesSPH( comm_world=comm, - loading_params=loading_params, - boundary_params=boundary_params, + Np=Np, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc_sph=[bc_x, "periodic", "periodic"], bufsize=1.0, + loading=loading, + loading_params=loading_params, domain=domain, - background=background, - perturbations=pert, - n_as_volume_form=True, + bckgr_params=bckgr_params, + pert_params=pert_params, verbose=False, ) @@ -471,42 +482,42 @@ def test_evaluation_SPH_Np_convergence_1d(boxes_per_dim, bc_x, eval_pts, tessela if comm is None: all_eval = test_eval else: - all_eval = xp.zeros_like(test_eval) + all_eval = np.zeros_like(test_eval) comm.Allreduce(test_eval, all_eval, op=MPI.SUM) if show_plot and rank == 0: plt.figure() plt.plot(ee1.squeeze(), exact_eval.squeeze(), label="exact") plt.plot(ee1.squeeze(), all_eval.squeeze(), "--.", label="eval_sph") - plt.title(f"{Np =}, {ppb =}") + plt.title(f"{Np = }, {ppb = }") # plt.savefig(f"fun_{Np}_{ppb}.png") - diff = xp.max(xp.abs(all_eval - exact_eval)) / xp.max(xp.abs(exact_eval)) + diff = np.max(np.abs(all_eval - exact_eval)) / np.max(np.abs(exact_eval)) err_vec += [diff] - print(f"{Np =}, {ppb =}, {diff =}") + print(f"{Np = }, {ppb = }, {diff = }") if tesselation: - fit = xp.polyfit(xp.log(ppbs), xp.log(err_vec), 1) + fit = np.polyfit(np.log(ppbs), np.log(err_vec), 1) xvec = ppbs else: - fit = xp.polyfit(xp.log(Nps), xp.log(err_vec), 1) + fit = np.polyfit(np.log(Nps), np.log(err_vec), 1) xvec = Nps if show_plot and rank == 0: plt.figure(figsize=(12, 8)) plt.loglog(xvec, err_vec, label="Convergence") - plt.loglog(xvec, xp.exp(fit[1]) * xp.array(xvec) ** (fit[0]), "--", label=f"fit with slope {fit[0]}") + plt.loglog(xvec, np.exp(fit[1]) * np.array(xvec) ** (fit[0]), "--", label=f"fit with slope {fit[0]}") plt.legend() plt.show() # plt.savefig(f"Convergence_SPH_{tesselation=}") if rank == 0: - print(f"\n{bc_x =}, {eval_pts =}, {tesselation =}, {fit[0] =}") + print(f"\n{bc_x = }, {eval_pts = }, {tesselation = }, {fit[0] = }") if tesselation: assert fit[0] < 2e-3 else: - assert xp.abs(fit[0] + 0.5) < 0.1 # Monte Carlo rate + assert np.abs(fit[0] + 0.5) < 0.1 # Monte Carlo rate @pytest.mark.parametrize("boxes_per_dim", [(12, 1, 1)]) @@ -528,50 +539,52 @@ def test_evaluation_SPH_h_convergence_1d(boxes_per_dim, bc_x, eval_pts, tesselat domain = domain_class(**dom_params) if tesselation: + loading = "tesselation" + loading_params = {"seed": 1607} Np = None ppb = 160 - loading_params = LoadingParameters(ppb=ppb, loading="tesselation") else: + loading = "pseudo_random" + loading_params = {"seed": 1607} Np = 160000 ppb = None - loading_params = LoadingParameters(Np=Np, ppb=ppb, seed=1607) - # background - background = ConstantVelocity(n=1.5, density_profile="constant") - background.domain = domain + cst_vel = {"density_profile": "constant", "n": 1.5} + bckgr_params = {"ConstantVelocity": cst_vel, "pforms": ["vol", None]} # perturbation + mode_params = {"given_in_basis": "0", "ls": [1], "amps": [-1e-0]} if bc_x in ("periodic", "fixed"): - fun_exact = lambda e1, e2, e3: 1.5 - xp.sin(2 * xp.pi * e1) - pert = {"n": perturbations.ModesSin(ls=(1,), amps=(-1e-0,))} + fun_exact = lambda e1, e2, e3: 1.5 - np.sin(2 * np.pi * e1) + modes = {"ModesSin": mode_params} elif bc_x == "mirror": - fun_exact = lambda e1, e2, e3: 1.5 - xp.cos(2 * xp.pi * e1) - pert = {"n": perturbations.ModesCos(ls=(1,), amps=(-1e-0,))} + fun_exact = lambda e1, e2, e3: 1.5 - np.cos(2 * np.pi * e1) + modes = {"ModesCos": mode_params} + pert_params = {"n": modes} # exact solution - eta1 = xp.linspace(0, 1.0, eval_pts) # add offset for non-periodic boundary conditions, TODO: implement Neumann - eta2 = xp.array([0.0]) - eta3 = xp.array([0.0]) - ee1, ee2, ee3 = xp.meshgrid(eta1, eta2, eta3, indexing="ij") + eta1 = np.linspace(0, 1.0, eval_pts) # add offset for non-periodic boundary conditions, TODO: implement Neumann + eta2 = np.array([0.0]) + eta3 = np.array([0.0]) + ee1, ee2, ee3 = np.meshgrid(eta1, eta2, eta3, indexing="ij") exact_eval = fun_exact(ee1, ee2, ee3) - # boundary conditions - boundary_params = BoundaryParameters(bc_sph=(bc_x, "periodic", "periodic")) - - # loop + # parameters h_vec = [((2**k) * 10**-3 * 0.25) for k in range(2, 12)] err_vec = [] for h1 in h_vec: particles = ParticlesSPH( comm_world=comm, - loading_params=loading_params, - boundary_params=boundary_params, + Np=Np, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc_sph=[bc_x, "periodic", "periodic"], bufsize=1.0, + loading=loading, + loading_params=loading_params, domain=domain, - background=background, - perturbations=pert, - n_as_volume_form=True, + bckgr_params=bckgr_params, + pert_params=pert_params, verbose=False, ) @@ -587,20 +600,20 @@ def test_evaluation_SPH_h_convergence_1d(boxes_per_dim, bc_x, eval_pts, tesselat if comm is None: all_eval = test_eval else: - all_eval = xp.zeros_like(test_eval) + all_eval = np.zeros_like(test_eval) comm.Allreduce(test_eval, all_eval, op=MPI.SUM) if show_plot and rank == 0: plt.figure() plt.plot(ee1.squeeze(), exact_eval.squeeze(), label="exact") plt.plot(ee1.squeeze(), all_eval.squeeze(), "--.", label="eval_sph") - plt.title(f"{h1 =}") + plt.title(f"{h1 = }") # plt.savefig(f"fun_{h1}.png") # error in max-norm - diff = xp.max(xp.abs(all_eval - exact_eval)) / xp.max(xp.abs(exact_eval)) + diff = np.max(np.abs(all_eval - exact_eval)) / np.max(np.abs(exact_eval)) - print(f"{h1 =}, {diff =}") + print(f"{h1 = }, {diff = }") if tesselation and h1 < 0.256: assert diff < 0.036 @@ -608,23 +621,23 @@ def test_evaluation_SPH_h_convergence_1d(boxes_per_dim, bc_x, eval_pts, tesselat err_vec += [diff] if tesselation: - fit = xp.polyfit(xp.log(h_vec[1:5]), xp.log(err_vec[1:5]), 1) + fit = np.polyfit(np.log(h_vec[1:5]), np.log(err_vec[1:5]), 1) else: - fit = xp.polyfit(xp.log(h_vec[:-2]), xp.log(err_vec[:-2]), 1) + fit = np.polyfit(np.log(h_vec[:-2]), np.log(err_vec[:-2]), 1) if show_plot and rank == 0: plt.figure(figsize=(12, 8)) plt.loglog(h_vec, err_vec, label="Convergence") - plt.loglog(h_vec, xp.exp(fit[1]) * xp.array(h_vec) ** (fit[0]), "--", label=f"fit with slope {fit[0]}") + plt.loglog(h_vec, np.exp(fit[1]) * np.array(h_vec) ** (fit[0]), "--", label=f"fit with slope {fit[0]}") plt.legend() plt.show() # plt.savefig("Convergence_SPH") if rank == 0: - print(f"\n{bc_x =}, {eval_pts =}, {tesselation =}, {fit[0] =}") + print(f"\n{bc_x = }, {eval_pts = }, {tesselation = }, {fit[0] = }") if not tesselation: - assert xp.abs(fit[0] + 0.5) < 0.1 # Monte Carlo rate + assert np.abs(fit[0] + 0.5) < 0.1 # Monte Carlo rate @pytest.mark.parametrize("boxes_per_dim", [(12, 1, 1)]) @@ -646,54 +659,55 @@ def test_evaluation_mc_Np_and_h_convergence_1d(boxes_per_dim, bc_x, eval_pts, te domain = domain_class(**dom_params) if tesselation: + loading = "tesselation" + loading_params = {"n_quad": 1} + # ppbs = [5000, 10000, 15000, 20000, 25000] ppbs = [4, 8, 16, 32, 64] Nps = [None] * len(ppbs) + else: + loading = "pseudo_random" + loading_params = {"seed": 1607} Nps = [(2**k) * 10**3 for k in range(-2, 9)] ppbs = [None] * len(Nps) - # background - background = ConstantVelocity(n=1.5, density_profile="constant") - background.domain = domain + cst_vel = {"density_profile": "constant", "n": 1.5} + bckgr_params = {"ConstantVelocity": cst_vel, "pforms": ["vol", None]} # perturbation + mode_params = {"given_in_basis": "0", "ls": [1], "amps": [-1e-0]} if bc_x in ("periodic", "fixed"): - fun_exact = lambda e1, e2, e3: 1.5 - xp.sin(2 * xp.pi * e1) - pert = {"n": perturbations.ModesSin(ls=(1,), amps=(-1e-0,))} + fun_exact = lambda e1, e2, e3: 1.5 - np.sin(2 * np.pi * e1) + modes = {"ModesSin": mode_params} elif bc_x == "mirror": - fun_exact = lambda e1, e2, e3: 1.5 - xp.cos(2 * xp.pi * e1) - pert = {"n": perturbations.ModesCos(ls=(1,), amps=(-1e-0,))} + fun_exact = lambda e1, e2, e3: 1.5 - np.cos(2 * np.pi * e1) + modes = {"ModesCos": mode_params} + pert_params = {"n": modes} # exact solution - eta1 = xp.linspace(0, 1.0, eval_pts) - eta2 = xp.array([0.0]) - eta3 = xp.array([0.0]) - ee1, ee2, ee3 = xp.meshgrid(eta1, eta2, eta3, indexing="ij") + eta1 = np.linspace(0, 1.0, eval_pts) + eta2 = np.array([0.0]) + eta3 = np.array([0.0]) + ee1, ee2, ee3 = np.meshgrid(eta1, eta2, eta3, indexing="ij") exact_eval = fun_exact(ee1, ee2, ee3) - # boundary conditions - boundary_params = BoundaryParameters(bc_sph=(bc_x, "periodic", "periodic")) - h_arr = [((2**k) * 10**-3 * 0.25) for k in range(2, 12)] err_vec = [] for h in h_arr: err_vec += [[]] for Np, ppb in zip(Nps, ppbs): - if tesselation: - loading_params = LoadingParameters(ppb=ppb, loading="tesselation") - else: - loading_params = LoadingParameters(Np=Np, seed=1607) - particles = ParticlesSPH( comm_world=comm, - loading_params=loading_params, - boundary_params=boundary_params, + Np=Np, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc_sph=[bc_x, "periodic", "periodic"], bufsize=1.0, + loading=loading, + loading_params=loading_params, domain=domain, - background=background, - perturbations=pert, - n_as_volume_form=True, + bckgr_params=bckgr_params, + pert_params=pert_params, verbose=False, ) @@ -710,15 +724,15 @@ def test_evaluation_mc_Np_and_h_convergence_1d(boxes_per_dim, bc_x, eval_pts, te if comm is None: all_eval = test_eval else: - all_eval = xp.zeros_like(test_eval) + all_eval = np.zeros_like(test_eval) comm.Allreduce(test_eval, all_eval, op=MPI.SUM) # error in max-norm - diff = xp.max(xp.abs(all_eval - exact_eval)) / xp.max(xp.abs(exact_eval)) + diff = np.max(np.abs(all_eval - exact_eval)) / np.max(np.abs(exact_eval)) err_vec[-1] += [diff] if rank == 0: - print(f"{Np =}, {ppb =}, {diff =}") + print(f"{Np = }, {ppb = }, {diff = }") # if show_plot: # plt.figure() # plt.plot(ee1.squeeze(), fun_exact(ee1, ee2, ee3).squeeze(), label="exact") @@ -726,29 +740,29 @@ def test_evaluation_mc_Np_and_h_convergence_1d(boxes_per_dim, bc_x, eval_pts, te # plt.title(f"{h = }, {Np = }") # # plt.savefig(f"fun_h{h}_N{Np}_ppb{ppb}.png") - err_vec = xp.array(err_vec) - err_min = xp.min(err_vec) + err_vec = np.array(err_vec) + err_min = np.min(err_vec) if show_plot and rank == 0: if tesselation: - h_mesh, n_mesh = xp.meshgrid(xp.log10(h_arr), xp.log10(ppbs), indexing="ij") + h_mesh, n_mesh = np.meshgrid(np.log10(h_arr), np.log10(ppbs), indexing="ij") if not tesselation: - h_mesh, n_mesh = xp.meshgrid(xp.log10(h_arr), xp.log10(Nps), indexing="ij") + h_mesh, n_mesh = np.meshgrid(np.log10(h_arr), np.log10(Nps), indexing="ij") plt.figure(figsize=(6, 6)) - plt.pcolor(h_mesh, n_mesh, xp.log10(err_vec), shading="auto") + plt.pcolor(h_mesh, n_mesh, np.log10(err_vec), shading="auto") plt.title("Error") plt.colorbar(label="log10(error)") plt.xlabel("log10(h)") plt.ylabel("log10(particles)") - min_indices = xp.argmin(err_vec, axis=0) + min_indices = np.argmin(err_vec, axis=0) min_h_values = [] for mi in min_indices: - min_h_values += [xp.log10(h_arr[mi])] + min_h_values += [np.log10(h_arr[mi])] if tesselation: - log_particles = xp.log10(ppbs) + log_particles = np.log10(ppbs) else: - log_particles = xp.log10(Nps) + log_particles = np.log10(Nps) plt.plot(min_h_values, log_particles, "r-", label="Min error h for each Np", linewidth=2) plt.legend() # plt.savefig("SPH_conv_in_h_and_N.png") @@ -756,11 +770,11 @@ def test_evaluation_mc_Np_and_h_convergence_1d(boxes_per_dim, bc_x, eval_pts, te plt.show() if rank == 0: - print(f"\n{tesselation =}, {bc_x =}, {err_min =}") + print(f"\n{tesselation = }, {bc_x = }, {err_min = }") if tesselation: if bc_x == "periodic": - assert xp.min(err_vec) < 7.7e-5 + assert np.min(err_vec) < 7.7e-5 elif bc_x == "fixed": assert err_min < 7.7e-5 else: @@ -794,63 +808,65 @@ def test_evaluation_SPH_Np_convergence_2d(boxes_per_dim, bc_x, bc_y, tesselation domain = domain_class(**dom_params) if tesselation: + loading = "tesselation" + loading_params = {"n_quad": 1} ppbs = [4, 8, 16, 32, 64, 200] Nps = [None] * len(ppbs) else: + loading = "pseudo_random" + loading_params = {"seed": 1607} Nps = [(2**k) * 10**3 for k in range(-2, 9)] ppbs = [None] * len(Nps) - # background - background = ConstantVelocity(n=1.5, density_profile="constant") - background.domain = domain + cst_vel = {"density_profile": "constant", "n": 1.5} + bckgr_params = {"ConstantVelocity": cst_vel, "pforms": ["vol", None]} # perturbation + mode_params = {"given_in_basis": "0", "ls": [1], "ms": [1], "amps": [-1e-0]} + if bc_x in ("periodic", "fixed"): if bc_y in ("periodic", "fixed"): - fun_exact = lambda x, y, z: 1.5 - xp.sin(2 * xp.pi / Lx * x) * xp.sin(2 * xp.pi / Ly * y) - pert = {"n": perturbations.ModesSinSin(ls=(1,), ms=(1,), amps=(-1e-0,))} + fun_exact = lambda x, y, z: 1.5 - np.sin(2 * np.pi / Lx * x) * np.sin(2 * np.pi / Ly * y) + modes = {"ModesSinSin": mode_params} elif bc_y == "mirror": - fun_exact = lambda x, y, z: 1.5 - xp.sin(2 * xp.pi / Lx * x) * xp.cos(2 * xp.pi / Ly * y) - pert = {"n": perturbations.ModesSinCos(ls=(1,), ms=(1,), amps=(-1e-0,))} + fun_exact = lambda x, y, z: 1.5 - np.sin(2 * np.pi / Lx * x) * np.cos(2 * np.pi / Ly * y) + modes = {"ModesSinCos": mode_params} elif bc_x == "mirror": if bc_y in ("periodic", "fixed"): - fun_exact = lambda x, y, z: 1.5 - xp.cos(2 * xp.pi / Lx * x) * xp.sin(2 * xp.pi / Ly * y) - pert = {"n": perturbations.ModesCosSin(ls=(1,), ms=(1,), amps=(-1e-0,))} + fun_exact = lambda x, y, z: 1.5 - np.cos(2 * np.pi / Lx * x) * np.sin(2 * np.pi / Ly * y) + modes = {"ModesCosSin": mode_params} elif bc_y == "mirror": - fun_exact = lambda x, y, z: 1.5 - xp.cos(2 * xp.pi / Lx * x) * xp.cos(2 * xp.pi / Ly * y) - pert = {"n": perturbations.ModesCosCos(ls=(1,), ms=(1,), amps=(-1e-0,))} + fun_exact = lambda x, y, z: 1.5 - np.cos(2 * np.pi / Lx * x) * np.cos(2 * np.pi / Ly * y) + modes = {"ModesCosCos": mode_params} + + pert_params = {"n": modes} # exact solution - eta1 = xp.linspace(0, 1.0, 41) - eta2 = xp.linspace(0, 1.0, 86) - eta3 = xp.array([0.0]) - ee1, ee2, ee3 = xp.meshgrid(eta1, eta2, eta3, indexing="ij") + eta1 = np.linspace(0, 1.0, 41) + eta2 = np.linspace(0, 1.0, 86) + eta3 = np.array([0.0]) + ee1, ee2, ee3 = np.meshgrid(eta1, eta2, eta3, indexing="ij") x, y, z = domain(eta1, eta2, eta3) exact_eval = fun_exact(x, y, z) - # boundary conditions - boundary_params = BoundaryParameters(bc_sph=(bc_x, bc_y, "periodic")) - err_vec = [] for Np, ppb in zip(Nps, ppbs): - if tesselation: - loading_params = LoadingParameters(ppb=ppb, loading="tesselation") - else: - loading_params = LoadingParameters(Np=Np, seed=1607) - particles = ParticlesSPH( comm_world=comm, - loading_params=loading_params, - boundary_params=boundary_params, + Np=Np, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc_sph=[bc_x, bc_y, "periodic"], bufsize=1.0, box_bufsize=4.0, + loading=loading, + loading_params=loading_params, domain=domain, - background=background, - perturbations=pert, - n_as_volume_form=True, + bckgr_params=bckgr_params, + pert_params=pert_params, verbose=False, + mpi_dims_mask=[True, False, False], ) if rank == 0: print(f"{particles.domain_array}") @@ -868,61 +884,66 @@ def test_evaluation_SPH_Np_convergence_2d(boxes_per_dim, bc_x, bc_y, tesselation if comm is None: all_eval = test_eval else: - all_eval = xp.zeros_like(test_eval) + all_eval = np.zeros_like(test_eval) comm.Allreduce(test_eval, all_eval, op=MPI.SUM) + # if rank == 0: + # print(f"{all_eval.squeeze().shape}") + # print(f"{all_eval.squeeze()[0]}") + # print(f"{all_eval.squeeze().T[0]}") + # error in max-norm - diff = xp.max(xp.abs(all_eval - exact_eval)) / xp.max(xp.abs(exact_eval)) + diff = np.max(np.abs(all_eval - exact_eval)) / np.max(np.abs(exact_eval)) err_vec += [diff] if tesselation: assert diff < 0.06 if rank == 0: - print(f"{Np =}, {ppb =}, {diff =}") + print(f"{Np = }, {ppb = }, {diff = }") if show_plot: fig, ax = plt.subplots() d = ax.pcolor(ee1.squeeze(), ee2.squeeze(), all_eval.squeeze(), label="eval_sph", vmin=1.0, vmax=2.0) fig.colorbar(d, ax=ax, label="2d_SPH") ax.set_xlabel("ee1") ax.set_ylabel("ee2") - ax.set_title(f"{Np}_{ppb =}") + ax.set_title(f"{Np}_{ppb = }") # fig.savefig(f"2d_sph_{Np}_{ppb}.png") if tesselation: - fit = xp.polyfit(xp.log(ppbs), xp.log(err_vec), 1) + fit = np.polyfit(np.log(ppbs), np.log(err_vec), 1) xvec = ppbs else: - fit = xp.polyfit(xp.log(Nps), xp.log(err_vec), 1) + fit = np.polyfit(np.log(Nps), np.log(err_vec), 1) xvec = Nps if show_plot and rank == 0: plt.figure(figsize=(12, 8)) plt.loglog(xvec, err_vec, label="Convergence") - plt.loglog(xvec, xp.exp(fit[1]) * xp.array(xvec) ** (fit[0]), "--", label=f"fit with slope {fit[0]}") + plt.loglog(xvec, np.exp(fit[1]) * np.array(xvec) ** (fit[0]), "--", label=f"fit with slope {fit[0]}") plt.legend() plt.show() # plt.savefig(f"Convergence_SPH_{tesselation=}") if rank == 0: - print(f"\n{bc_x =}, {tesselation =}, {fit[0] =}") + print(f"\n{bc_x = }, {tesselation = }, {fit[0] = }") if not tesselation: - assert xp.abs(fit[0] + 0.5) < 0.1 # Monte Carlo rate + assert np.abs(fit[0] + 0.5) < 0.1 # Monte Carlo rate if __name__ == "__main__": - test_sph_evaluation_1d( - (24, 1, 1), - "trigonometric_1d", - # "gaussian_1d", - 1, - # "periodic", - "mirror", - 16, - tesselation=False, - show_plot=True, - ) + # test_sph_evaluation_1d( + # (24, 1, 1), + # "trigonometric_1d", + # # "gaussian_1d", + # 1, + # "periodic", + # # "mirror", + # 10, + # tesselation=True, + # show_plot=True + # ) # test_sph_evaluation_2d( # (12, 12, 1), @@ -953,7 +974,7 @@ def test_evaluation_SPH_Np_convergence_2d(boxes_per_dim, bc_x, bc_y, tesselation # test_evaluation_SPH_h_convergence_1d((12,1,1), "periodic", eval_pts=16, tesselation=True, show_plot=True) # test_evaluation_mc_Np_and_h_convergence_1d((12,1,1),"mirror", eval_pts=16, tesselation = False, show_plot=True) # test_evaluation_SPH_Np_convergence_2d((24, 24, 1), "periodic", "periodic", tesselation=True, show_plot=True) - # test_evaluation_SPH_Np_convergence_2d((24, 24, 1), "periodic", "fixed", tesselation=True, show_plot=True) + test_evaluation_SPH_Np_convergence_2d((24, 24, 1), "periodic", "fixed", tesselation=True, show_plot=True) # test_evaluation_SPH_Np_convergence_2d((32, 32, 1), "fixed", "periodic", tesselation=True, show_plot=True) # test_evaluation_SPH_Np_convergence_2d((32, 32, 1), "fixed", "fixed", tesselation=True, show_plot=True) # test_evaluation_SPH_Np_convergence_2d((32, 32, 1), "mirror", "mirror", tesselation=True, show_plot=True) diff --git a/src/struphy/pic/tests/test_tesselation.py b/src/struphy/pic/tests/test_tesselation.py index b138af50a..7215bf76b 100644 --- a/src/struphy/pic/tests/test_tesselation.py +++ b/src/struphy/pic/tests/test_tesselation.py @@ -1,16 +1,13 @@ from time import time -import cunumpy as xp import pytest from matplotlib import pyplot as plt from psydac.ddm.mpi import mpi as MPI from struphy.feec.psydac_derham import Derham -from struphy.fields_background.equils import ConstantVelocity from struphy.geometry import domains -from struphy.initial import perturbations from struphy.pic.particles import ParticlesSPH -from struphy.pic.utilities import BoundaryParameters, LoadingParameters, WeightsParameters +from struphy.utils.arrays import xp as np @pytest.mark.parametrize("ppb", [8, 12]) @@ -27,14 +24,17 @@ def test_draw(ppb, nx, ny, nz): domain = domain_class(**dom_params) boxes_per_dim = (nx, ny, nz) + bc = ["periodic"] * 3 + loading = "tesselation" bufsize = 0.5 - loading_params = LoadingParameters(ppb=ppb, loading="tesselation") # instantiate Particle object particles = ParticlesSPH( comm_world=comm, - loading_params=loading_params, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc=bc, + loading=loading, domain=domain, verbose=False, bufsize=bufsize, @@ -56,20 +56,20 @@ def test_draw(ppb, nx, ny, nz): zl = particles.domain_array[rank, 6] zr = particles.domain_array[rank, 7] - eta1 = xp.linspace(xl, xr, tiles_x + 1)[:-1] + (xr - xl) / (2 * tiles_x) - eta2 = xp.linspace(yl, yr, tiles_y + 1)[:-1] + (yr - yl) / (2 * tiles_y) - eta3 = xp.linspace(zl, zr, tiles_z + 1)[:-1] + (zr - zl) / (2 * tiles_z) + eta1 = np.linspace(xl, xr, tiles_x + 1)[:-1] + (xr - xl) / (2 * tiles_x) + eta2 = np.linspace(yl, yr, tiles_y + 1)[:-1] + (yr - yl) / (2 * tiles_y) + eta3 = np.linspace(zl, zr, tiles_z + 1)[:-1] + (zr - zl) / (2 * tiles_z) - ee1, ee2, ee3 = xp.meshgrid(eta1, eta2, eta3, indexing="ij") + ee1, ee2, ee3 = np.meshgrid(eta1, eta2, eta3, indexing="ij") e1 = ee1.flatten() e2 = ee2.flatten() e3 = ee3.flatten() # print(f'\n{rank = }, {e1 = }') - assert xp.allclose(particles.positions[:, 0], e1) - assert xp.allclose(particles.positions[:, 1], e2) - assert xp.allclose(particles.positions[:, 2], e3) + assert np.allclose(particles.positions[:, 0], e1) + assert np.allclose(particles.positions[:, 1], e2) + assert np.allclose(particles.positions[:, 2], e3) @pytest.mark.parametrize("ppb", [8, 12]) @@ -87,24 +87,31 @@ def test_cell_average(ppb, nx, ny, nz, n_quad, show_plot=False): domain = domain_class(**dom_params) boxes_per_dim = (nx, ny, nz) - loading_params = LoadingParameters(ppb=ppb, loading="tesselation", n_quad=n_quad) + bc = ["periodic"] * 3 + loading = "tesselation" + loading_params = {"n_quad": n_quad} bufsize = 0.5 - background = ConstantVelocity(n=1.0, ux=0.0, uy=0.0, uz=0.0, density_profile="constant") - background.domain = domain + cst_vel = {"ux": 0.0, "uy": 0.0, "uz": 0.0, "density_profile": "constant"} + bckgr_params = {"ConstantVelocity": cst_vel} - pert = {"n": perturbations.ModesSin(ls=(1,), amps=(1e-0,))} + mode_params = {"given_in_basis": "0", "ls": [1], "amps": [1e-0]} + modes = {"ModesSin": mode_params} + pert_params = {"n": modes} # instantiate Particle object particles = ParticlesSPH( comm_world=comm, + ppb=ppb, boxes_per_dim=boxes_per_dim, + bc=bc, + loading=loading, loading_params=loading_params, domain=domain, verbose=False, bufsize=bufsize, - background=background, - perturbations=pert, + bckgr_params=bckgr_params, + pert_params=pert_params, ) particles.draw_markers(sort=False) @@ -119,20 +126,20 @@ def test_cell_average(ppb, nx, ny, nz, n_quad, show_plot=False): yl = particles.domain_array[rank, 3] yr = particles.domain_array[rank, 4] - eta1 = xp.linspace(xl, xr, tiles_x + 1) - eta2 = xp.linspace(yl, yr, tiles_y + 1) + eta1 = np.linspace(xl, xr, tiles_x + 1) + eta2 = np.linspace(yl, yr, tiles_y + 1) if ny == nz == 1: plt.figure(figsize=(15, 10)) - plt.plot(particles.positions[:, 0], xp.zeros_like(particles.weights), "o", label="markers") + plt.plot(particles.positions[:, 0], np.zeros_like(particles.weights), "o", label="markers") plt.plot(particles.positions[:, 0], particles.weights, "-o", label="weights") plt.plot( - xp.linspace(xl, xr, 100), - particles.f_init(xp.linspace(xl, xr, 100), 0.5, 0.5).squeeze(), + np.linspace(xl, xr, 100), + particles.f_init(np.linspace(xl, xr, 100), 0.5, 0.5).squeeze(), "--", label="f_init", ) - plt.vlines(xp.linspace(xl, xr, nx + 1), 0, 2, label="sorting boxes", color="k") + plt.vlines(np.linspace(xl, xr, nx + 1), 0, 2, label="sorting boxes", color="k") ax = plt.gca() ax.set_xticks(eta1) ax.set_yticks(eta2) @@ -146,8 +153,8 @@ def test_cell_average(ppb, nx, ny, nz, n_quad, show_plot=False): plt.subplot(1, 2, 1) ax = plt.gca() - ax.set_xticks(xp.linspace(0, 1, nx + 1)) - ax.set_yticks(xp.linspace(0, 1, ny + 1)) + ax.set_xticks(np.linspace(0, 1, nx + 1)) + ax.set_yticks(np.linspace(0, 1, ny + 1)) coloring = particles.weights plt.scatter(particles.positions[:, 0], particles.positions[:, 1], c=coloring, s=40) plt.grid(c="k") @@ -159,12 +166,12 @@ def test_cell_average(ppb, nx, ny, nz, n_quad, show_plot=False): plt.subplot(1, 2, 2) ax = plt.gca() - ax.set_xticks(xp.linspace(0, 1, nx + 1)) - ax.set_yticks(xp.linspace(0, 1, ny + 1)) + ax.set_xticks(np.linspace(0, 1, nx + 1)) + ax.set_yticks(np.linspace(0, 1, ny + 1)) coloring = particles.weights - pos1 = xp.linspace(xl, xr, 100) - pos2 = xp.linspace(yl, yr, 100) - pp1, pp2 = xp.meshgrid(pos1, pos2, indexing="ij") + pos1 = np.linspace(xl, xr, 100) + pos2 = np.linspace(yl, yr, 100) + pp1, pp2 = np.meshgrid(pos1, pos2, indexing="ij") plt.pcolor(pp1, pp2, particles.f_init(pp1, pp2, 0.5).squeeze()) plt.grid(c="k") plt.axis("square") @@ -176,10 +183,10 @@ def test_cell_average(ppb, nx, ny, nz, n_quad, show_plot=False): plt.show() # test - print(f"\n{rank =}, {xp.max(xp.abs(particles.weights - particles.f_init(particles.positions))) =}") - assert xp.max(xp.abs(particles.weights - particles.f_init(particles.positions))) < 0.012 + print(f"\n{rank = }, {np.max(np.abs(particles.weights - particles.f_init(particles.positions))) = }") + assert np.max(np.abs(particles.weights - particles.f_init(particles.positions))) < 0.012 if __name__ == "__main__": - test_draw(8, 16, 1, 1) + # test_draw(8, 16, 1, 1) test_cell_average(8, 6, 16, 14, n_quad=2, show_plot=True) diff --git a/src/struphy/pic/utilities.py b/src/struphy/pic/utilities.py index 3ae645557..5507526a5 100644 --- a/src/struphy/pic/utilities.py +++ b/src/struphy/pic/utilities.py @@ -1,239 +1,5 @@ -import cunumpy as xp - import struphy.pic.utilities_kernels as utils -from struphy.io.options import ( - OptsLoading, - OptsMarkerBC, - OptsRecontructBC, - OptsSpatialLoading, -) - - -class LoadingParameters: - """Parameters for particle loading. - - Parameters - ---------- - Np : int - Total number of particles to load. - - ppc : int - Particles to load per cell if a grid is defined. Cells are defined from ``domain_array``. - - ppb : int - Particles to load per sorting box. Sorting boxes are defined from ``boxes_per_dim``. - - loading : OptsLoading - How to load markers: multiple options for Monte-Carlo, or "tesselation" for positioning them on a regular grid. - - seed : int - Seed for random generator. If None, no seed is taken. - - moments : tuple - Mean velocities and temperatures for the Gaussian sampling distribution. - If None, these are auto-calculated form the given background. - - spatial : OptsSpatialLoading - Draw uniformly in eta, or draw uniformly on the "disc" image of (eta1, eta2). - - specific_markers : tuple[tuple] - Each entry is a tuple of phase space coordinates (floats) of a specific marker to be initialized. - - n_quad : int - Number of quadrature points for tesselation. - - dir_external : str - Load markers from external .hdf5 file (absolute path). - - dir_particles_abs : str - Load markers from restart .hdf5 file (absolute path). - - dir_particles : str - Load markers from restart .hdf5 file (relative path to output folder). - - restart_key : str - Key in .hdf5 file's restart/ folder where marker array is stored. - """ - - def __init__( - self, - Np: int = None, - ppc: int = None, - ppb: int = 10, - loading: OptsLoading = "pseudo_random", - seed: int = None, - moments: tuple = None, - spatial: OptsSpatialLoading = "uniform", - specific_markers: tuple[tuple] = None, - n_quad: int = 1, - dir_exrernal: str = None, - dir_particles: str = None, - dir_particles_abs: str = None, - restart_key: str = None, - ): - self.Np = Np - self.ppc = ppc - self.ppb = ppb - self.loading = loading - self.seed = seed - self.moments = moments - self.spatial = spatial - self.specific_markers = specific_markers - self.n_quad = n_quad - self.dir_external = dir_exrernal - self.dir_particles = dir_particles - self.dir_particles_abs = dir_particles_abs - self.restart_key = restart_key - - -class WeightsParameters: - """Paramters for particle weights. - - Parameters - ---------- - control_variate : bool - Whether to use a control variate for noise reduction. - - reject_weights : bool - Whether to reject weights below threshold. - - threshold : float - Threshold for rejecting weights. - """ - - def __init__( - self, - control_variate: bool = False, - reject_weights: bool = False, - threshold: float = 0.0, - ): - self.control_variate = control_variate - self.reject_weights = reject_weights - self.threshold = threshold - - -class BoundaryParameters: - """Parameters for particle boundary and sph reconstruction boundary conditions. - - Parameters - ---------- - bc : tuple[OptsMarkerBC] - Boundary conditions for particle movement. - Either 'remove', 'reflect', 'periodic' or 'refill' in each direction. - - bc_refill : list - Either 'inner' or 'outer'. - - bc_sph : tuple[OptsRecontructBC] - Boundary conditions for sph kernel reconstruction. - """ - - def __init__( - self, - bc: tuple[OptsMarkerBC] = ("periodic", "periodic", "periodic"), - bc_refill=None, - bc_sph: tuple[OptsRecontructBC] = ("periodic", "periodic", "periodic"), - ): - self.bc = bc - self.bc_refill = bc_refill - self.bc_sph = bc_sph - - -class BinningPlot: - """Binning plot of marker distribution in phase space. - - Parameters - ---------- - slice : str - Coordinate-slice in phase space to bin. A combination of "e1", "e2", "e3", "v1", etc., separated by an underscore "_". - For example, "e1" showas a 1D binning plot over eta1, whereas "e1_v1" shows a 2D binning plot over eta1 and v1. - - n_bins : int | tuple[int] - Number of bins for each coordinate. - - ranges : tuple[int] | tuple[tuple[int]] = (0.0, 1.0) - Binning range (as an interval in R) for each coordinate. - - divide_by_jac : bool - Whether to divide by the Jacobian determinant (volume-to-0-form). - """ - - def __init__( - self, - slice: str = "e1", - n_bins: int | tuple[int] = 128, - ranges: tuple[float] | tuple[tuple[float]] = (0.0, 1.0), - divide_by_jac: bool = True, - ): - if isinstance(n_bins, int): - n_bins = (n_bins,) - - if not isinstance(ranges[0], tuple): - ranges = (ranges,) - - assert ((len(slice) - 2) / 3).is_integer(), f"Binning coordinates must be separated by '_', but reads {slice}." - assert len(slice.split("_")) == len(ranges) == len(n_bins), ( - f"Number of slices names ({len(slice.split('_'))}), number of bins ({len(n_bins)}), and number of ranges ({len(ranges)}) are inconsistent with each other!\n\n" - ) - self.slice = slice - self.n_bins = n_bins - self.ranges = ranges - self.divide_by_jac = divide_by_jac - - # computations and allocations - self._bin_edges = [] - for nb, rng in zip(n_bins, ranges): - self._bin_edges += [xp.linspace(rng[0], rng[1], nb + 1)] - self._bin_edges = tuple(self.bin_edges) - - self._f = xp.zeros(n_bins, dtype=float) - self._df = xp.zeros(n_bins, dtype=float) - - @property - def bin_edges(self) -> tuple: - return self._bin_edges - - @property - def f(self) -> xp.ndarray: - """The binned distribution function (full-f).""" - return self._f - - @property - def df(self) -> xp.ndarray: - """The binned distribution function minus the background (delta-f).""" - return self._df - - -class KernelDensityPlot: - """SPH density plot in configuration space. - - Parameters - ---------- - pts_e1, pts_e2, pts_e3 : int - Number of evaluation points in each direction. - """ - - def __init__( - self, - pts_e1: int = 16, - pts_e2: int = 16, - pts_e3: int = 1, - ): - e1 = xp.linspace(0.0, 1.0, pts_e1) - e2 = xp.linspace(0.0, 1.0, pts_e2) - e3 = xp.linspace(0.0, 1.0, pts_e3) - ee1, ee2, ee3 = xp.meshgrid(e1, e2, e3, indexing="ij") - self._plot_pts = (ee1, ee2, ee3) - self._n_sph = xp.zeros(ee1.shape, dtype=float) - - @property - def plot_pts(self) -> tuple: - return self._plot_pts - - @property - def n_sph(self) -> xp.ndarray: - """The evaluated density.""" - return self._n_sph +from struphy.utils.arrays import xp as np def get_kinetic_energy_particles(fe_coeffs, derham, domain, particles): @@ -252,15 +18,15 @@ def get_kinetic_energy_particles(fe_coeffs, derham, domain, particles): Particles object. """ - res = xp.empty(1, dtype=float) + res = np.empty(1, dtype=float) utils.canonical_kinetic_particles( res, particles.markers, - xp.array(derham.p), + np.array(derham.p), derham.Vh_fem["0"].knots[0], derham.Vh_fem["0"].knots[1], derham.Vh_fem["0"].knots[2], - xp.array( + np.array( derham.V0.coeff_space.starts, ), *domain.args_map, @@ -285,7 +51,7 @@ def get_electron_thermal_energy(density_0_form, derham, domain, nel1, nel2, nel3 Discrete Derham complex. """ - res = xp.empty(1, dtype=float) + res = np.empty(1, dtype=float) utils.thermal_energy( res, density_0_form._operators[0].matrix._data, diff --git a/src/struphy/pic/utilities_kernels.py b/src/struphy/pic/utilities_kernels.py index cb25cc05f..d0f3c4e92 100644 --- a/src/struphy/pic/utilities_kernels.py +++ b/src/struphy/pic/utilities_kernels.py @@ -1,4 +1,4 @@ -from numpy import abs, empty, log, mod, pi, shape, sign, sqrt, zeros +from numpy import abs, empty, log, pi, shape, sign, sqrt, zeros from pyccel.decorators import stack_array import struphy.bsplines.bsplines_kernels as bsplines_kernels @@ -14,7 +14,7 @@ eval_vectorfield_spline_mpi, get_spans, ) -from struphy.kernel_arguments.pusher_args_kernels import DerhamArguments, DomainArguments, MarkerArguments +from struphy.kernel_arguments.pusher_args_kernels import DerhamArguments, DomainArguments def eval_magnetic_moment_5d( @@ -331,71 +331,6 @@ def eval_magnetic_energy( # -- removed omp: #$ omp end parallel -@stack_array("dfm", "eta") -def eval_magnetic_energy_PBb( - markers: "float[:,:]", - args_derham: "DerhamArguments", - args_domain: "DomainArguments", - first_diagnostics_idx: int, - abs_B0: "float[:,:,:]", - PBb: "float[:,:,:]", -): - r""" - Evaluate :math:`mu_p |B(\boldsymbol \eta_p)_\parallel|` for each marker. - The result is stored at markers[:, first_diagnostics_idx]. - """ - eta = empty(3, dtype=float) - - dfm = empty((3, 3), dtype=float) - - # get number of markers - n_markers = shape(markers)[0] - - for ip in range(n_markers): - # only do something if particle is a "true" particle (i.e. not a hole) - if markers[ip, 0] == -1.0: - continue - - eta[:] = mod(markers[ip, 0:3], 1.0) - - weight = markers[ip, 7] - dweight = markers[ip, 5] - - mu = markers[ip, first_diagnostics_idx + 1] - - # spline evaluation - span1, span2, span3 = get_spans(eta[0], eta[1], eta[2], args_derham) - - # evaluate Jacobian, result in dfm - evaluation_kernels.df( - eta[0], - eta[1], - eta[2], - args_domain, - dfm, - ) - - # abs_B0; 0form - abs_B = eval_0form_spline_mpi( - span1, - span2, - span3, - args_derham, - abs_B0, - ) - - # PBb; 0form - PB_b = eval_0form_spline_mpi( - span1, - span2, - span3, - args_derham, - PBb, - ) - - markers[ip, first_diagnostics_idx] = mu * (abs_B + PB_b) - - @stack_array("v", "dfm", "b2", "norm_b_cart", "temp", "v_perp", "Larmor_r") def eval_guiding_center_from_6d( markers: "float[:,:]", @@ -506,101 +441,189 @@ def eval_guiding_center_from_6d( markers[ip, first_diagnostics_idx + 2] = z - Larmor_r[2] -@stack_array("dfm", "df_t", "g", "g_inv", "gradB, grad_PB_b", "tmp", "eta_mid", "eta_diff") -def eval_gradB_ediff( - args_markers: "MarkerArguments", - args_domain: "DomainArguments", +@stack_array("grad_PB", "tmp") +def accum_gradI_const( + markers: "float[:,:]", + Np: "int", args_derham: "DerhamArguments", - gradB1: "float[:,:,:]", - gradB2: "float[:,:,:]", - gradB3: "float[:,:,:]", - grad_PB_b1: "float[:,:,:]", - grad_PB_b2: "float[:,:,:]", - grad_PB_b3: "float[:,:,:]", - idx: int, + grad_PB1: "float[:,:,:]", + grad_PB2: "float[:,:,:]", + grad_PB3: "float[:,:,:]", + scale: "float", ): r"""TODO""" - - # allocate metric coeffs - dfm = empty((3, 3), dtype=float) - df_t = empty((3, 3), dtype=float) - g = empty((3, 3), dtype=float) - g_inv = empty((3, 3), dtype=float) - # allocate for magnetic field evaluation - gradB = empty(3, dtype=float) - grad_PB_b = empty(3, dtype=float) + grad_PB = empty(3, dtype=float) tmp = empty(3, dtype=float) - eta_mid = empty(3, dtype=float) - eta_diff = empty(3, dtype=float) - # get marker arguments - markers = args_markers.markers - n_markers = args_markers.n_markers - mu_idx = args_markers.mu_idx - first_init_idx = args_markers.first_init_idx - first_free_idx = args_markers.first_free_idx + # allocate for filling + res = zeros(1, dtype=float) - for ip in range(n_markers): + # get number of markers + n_markers_loc = shape(markers)[0] + + for ip in range(n_markers_loc): # only do something if particle is a "true" particle (i.e. not a hole) if markers[ip, 0] == -1.0: continue - # marker positions, mid point - eta_mid[:] = (markers[ip, 0:3] + markers[ip, first_init_idx : first_init_idx + 3]) / 2.0 - eta_mid[:] = mod(eta_mid[:], 1.0) - - eta_diff = markers[ip, 0:3] - markers[ip, first_init_idx : first_init_idx + 3] + # marker positions + eta1 = markers[ip, 0] # mid + eta2 = markers[ip, 1] # mid + eta3 = markers[ip, 2] # mid # marker weight and velocity weight = markers[ip, 5] - mu = markers[ip, mu_idx] + mu = markers[ip, 9] # b-field evaluation - span1, span2, span3 = get_spans(eta_mid[0], eta_mid[1], eta_mid[2], args_derham) - # print(span1, span2, span3) - - # evaluate Jacobian, result in dfm - evaluation_kernels.df( - eta_mid[0], - eta_mid[1], - eta_mid[2], - args_domain, - dfm, - ) - - linalg_kernels.transpose(dfm, df_t) - linalg_kernels.matrix_matrix(df_t, dfm, g) - linalg_kernels.matrix_inv(g, g_inv) + span1, span2, span3 = get_spans(eta1, eta2, eta3, args_derham) - # gradB; 1form + # grad_PB; 1form eval_1form_spline_mpi( span1, span2, span3, args_derham, - gradB1, - gradB2, - gradB3, - gradB, + grad_PB1, + grad_PB2, + grad_PB3, + grad_PB, ) - # grad_PB_b; 1form - eval_1form_spline_mpi( + tmp[:] = markers[ip, 15:18] + res += linalg_kernels.scalar_dot(tmp, grad_PB) * weight * mu * scale + + return res / Np + + +def accum_en_fB( + markers: "float[:,:]", + Np: "int", + args_derham: "DerhamArguments", + PB: "float[:,:,:]", +): + r"""TODO""" + + # allocate for filling + res = zeros(1, dtype=float) + + # get number of markers + n_markers_loc = shape(markers)[0] + + for ip in range(n_markers_loc): + # only do something if particle is a "true" particle (i.e. not a hole) + if markers[ip, 0] == -1.0: + continue + + # marker positions + eta1 = markers[ip, 0] + eta2 = markers[ip, 1] + eta3 = markers[ip, 2] + + # marker weight and velocity + mu = markers[ip, 9] + weight = markers[ip, 5] + + # b-field evaluation + span1, span2, span3 = get_spans(eta1, eta2, eta3, args_derham) + + B0 = eval_0form_spline_mpi( span1, span2, span3, args_derham, - grad_PB_b1, - grad_PB_b2, - grad_PB_b3, - grad_PB_b, + PB, ) - tmp = gradB + grad_PB_b + res += abs(B0) * mu * weight + + return res / Np + + +@stack_array("e", "e_diff") +def check_eta_diff(markers: "float[:,:]"): + r"""TODO""" + # marker position e + e = empty(3, dtype=float) + e_diff = empty(3, dtype=float) + + # get number of markers + n_markers_loc = shape(markers)[0] + + for ip in range(n_markers_loc): + # only do something if particle is a "true" particle (i.e. not a hole) + if markers[ip, 0] == -1.0: + continue + + e[:] = markers[ip, 0:3] + e_diff[:] = e[:] - markers[ip, 9:12] + + for axis in range(3): + if e_diff[axis] > 0.5: + e_diff[axis] -= 1.0 + elif e_diff[axis] < -0.5: + e_diff[axis] += 1.0 + + markers[ip, 15:18] = e_diff[:] + + +@stack_array("e", "e_diff") +def check_eta_diff2(markers: "float[:,:]"): + r"""TODO""" + # marker position e + e = empty(3, dtype=float) + e_diff = empty(3, dtype=float) + + # get number of markers + n_markers_loc = shape(markers)[0] + + for ip in range(n_markers_loc): + # only do something if particle is a "true" particle (i.e. not a hole) + if markers[ip, 0] == -1.0: + continue + + e[:] = markers[ip, 0:3] + e_diff[:] = e[:] - markers[ip, 12:15] + + for axis in range(3): + if e_diff[axis] > 0.5: + e_diff[axis] -= 1.0 + elif e_diff[axis] < -0.5: + e_diff[axis] += 1.0 + + markers[ip, 15:18] = e_diff[:] + + +@stack_array("e", "e_diff", "e_mid") +def check_eta_mid(markers: "float[:,:]"): + r"""TODO""" + # marker position e + e = empty(3, dtype=float) + e_diff = empty(3, dtype=float) + e_mid = empty(3, dtype=float) + + # get number of markers + n_markers_loc = shape(markers)[0] + + for ip in range(n_markers_loc): + # only do something if particle is a "true" particle (i.e. not a hole) + if markers[ip, 0] == -1.0: + continue + + e[:] = markers[ip, 0:3] + markers[ip, 12:15] = e[:] + + e_diff[:] = e[:] - markers[ip, 9:12] + e_mid[:] = (e[:] + markers[ip, 9:12]) / 2.0 + + for axis in range(3): + if e_diff[axis] > 0.5: + e_mid[axis] += 0.5 + elif e_diff[axis] < -0.5: + e_mid[axis] += 0.5 - markers[ip, idx] = linalg_kernels.scalar_dot(eta_diff, tmp) - markers[ip, idx] *= mu + markers[ip, 0:3] = e_mid[:] @stack_array("dfm", "dfinv", "dfinv_t", "v", "a_form", "dfta_form") diff --git a/src/struphy/polar/basic.py b/src/struphy/polar/basic.py index 99a95cc47..78b81d4ff 100644 --- a/src/struphy/polar/basic.py +++ b/src/struphy/polar/basic.py @@ -1,9 +1,10 @@ -import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from psydac.linalg.basic import Vector, VectorSpace from psydac.linalg.block import BlockVector from psydac.linalg.stencil import StencilVector +from struphy.utils.arrays import xp as np + class PolarDerhamSpace(VectorSpace): """ @@ -19,7 +20,7 @@ class PolarDerhamSpace(VectorSpace): """ def __init__(self, derham, space_id): - assert not derham.spl_kind[0], "Spline basis in eta1 must be clamped" + assert derham.spl_kind[0] == False, "Spline basis in eta1 must be clamped" assert derham.spl_kind[1], "Spline basis in eta2 must be periodic" assert (derham.Nel[1] / 3) % 1 == 0.0, "Number of elements in eta2 must be a multiple of 3" @@ -208,7 +209,7 @@ class PolarVector(Vector): Element of a PolarDerhamSpace. An instance of a PolarVector consists of two parts: - 1. a list of xp.arrays of the polar coeffs (not distributed) + 1. a list of np.arrays of the polar coeffs (not distributed) 2. a tensor product StencilVector/BlockVector of the parent space with inner rings set to zero (distributed). Parameters @@ -223,7 +224,7 @@ def __init__(self, V): self._dtype = V.dtype # initialize polar coeffs - self._pol = [xp.zeros((m, n)) for m, n in zip(V.n_polar, V.n3)] + self._pol = [np.zeros((m, n)) for m, n in zip(V.n_polar, V.n3)] # full tensor product vector self._tp = V.parent_space.zeros() @@ -240,7 +241,7 @@ def dtype(self): @property def pol(self): - """Polar coefficients as xp.array.""" + """Polar coefficients as np.array.""" return self._pol @pol.setter @@ -326,7 +327,7 @@ def toarray(self, allreduce=False): if self.space.comm is not None and allreduce: self.space.comm.Allreduce(MPI.IN_PLACE, out, op=MPI.SUM) - out = xp.concatenate((self.pol[0].flatten(), out)) + out = np.concatenate((self.pol[0].flatten(), out)) else: out1 = self.tp[0].toarray()[self.space.n_rings[0] * self.space.n[1] * self.space.n3[0] :] @@ -339,7 +340,7 @@ def toarray(self, allreduce=False): self.space.comm.Allreduce(MPI.IN_PLACE, out2, op=MPI.SUM) self.space.comm.Allreduce(MPI.IN_PLACE, out3, op=MPI.SUM) - out = xp.concatenate( + out = np.concatenate( ( self.pol[0].flatten(), out1, @@ -347,7 +348,7 @@ def toarray(self, allreduce=False): out2, self.pol[2].flatten(), out3, - ), + ) ) return out @@ -365,7 +366,7 @@ def copy(self, out=None): self._tp.copy(out=w.tp) # copy polar part for n, pl in enumerate(self._pol): - xp.copyto(w._pol[n], pl, casting="no") + np.copyto(w._pol[n], pl, casting="no") return w def __neg__(self): diff --git a/src/struphy/polar/extraction_operators.py b/src/struphy/polar/extraction_operators.py index 1c2a461ce..9afb9d237 100644 --- a/src/struphy/polar/extraction_operators.py +++ b/src/struphy/polar/extraction_operators.py @@ -1,4 +1,4 @@ -import cunumpy as xp +from struphy.utils.arrays import xp as np # ============================= 2D polar splines (C1) =================================== @@ -47,8 +47,8 @@ def __init__(self, domain, derham): self._pole = (cx[0, 0], cy[0, 0]) - assert xp.all(cx[0] == self.pole[0]) - assert xp.all(cy[0] == self.pole[1]) + assert np.all(cx[0] == self.pole[0]) + assert np.all(cy[0] == self.pole[1]) self._n0 = cx.shape[0] self._n1 = cx.shape[1] @@ -70,14 +70,14 @@ def __init__(self, domain, derham): self._tau = max( [ ((self.cx[1] - self.pole[0]) * (-2)).max(), - ((self.cx[1] - self.pole[0]) - xp.sqrt(3) * (self.cy[1] - self.pole[1])).max(), - ((self.cx[1] - self.pole[0]) + xp.sqrt(3) * (self.cy[1] - self.pole[1])).max(), - ], + ((self.cx[1] - self.pole[0]) - np.sqrt(3) * (self.cy[1] - self.pole[1])).max(), + ((self.cx[1] - self.pole[0]) + np.sqrt(3) * (self.cy[1] - self.pole[1])).max(), + ] ) # barycentric coordinates - self._xi_0 = xp.zeros((3, self.n1), dtype=float) - self._xi_1 = xp.zeros((3, self.n1), dtype=float) + self._xi_0 = np.zeros((3, self.n1), dtype=float) + self._xi_1 = np.zeros((3, self.n1), dtype=float) self._xi_0[:, :] = 1 / 3 @@ -85,12 +85,12 @@ def __init__(self, domain, derham): self._xi_1[1, :] = ( 1 / 3 - 1 / (3 * self.tau) * (self.cx[1] - self.pole[0]) - + xp.sqrt(3) / (3 * self.tau) * (self.cy[1] - self.pole[1]) + + np.sqrt(3) / (3 * self.tau) * (self.cy[1] - self.pole[1]) ) self._xi_1[2, :] = ( 1 / 3 - 1 / (3 * self.tau) * (self.cx[1] - self.pole[0]) - - xp.sqrt(3) / (3 * self.tau) * (self.cy[1] - self.pole[1]) + - np.sqrt(3) / (3 * self.tau) * (self.cy[1] - self.pole[1]) ) # remove small values @@ -102,17 +102,17 @@ def __init__(self, domain, derham): # ============= basis extraction operator for discrete 0-forms ================ # first n_rings tp rings --> "polar coeffs" - e0_blocks_ten_to_pol = xp.block([self.xi_0, self.xi_1]) + e0_blocks_ten_to_pol = np.block([self.xi_0, self.xi_1]) self._e_ten_to_pol["0"] = [[csr(e0_blocks_ten_to_pol)]] # ============ basis extraction operator for discrete 1-forms (Hcurl) ========= # first n_rings tp rings --> "polar coeffs" - e1_11_blocks_ten_to_pol = xp.zeros((self.n_polar[1][0], self.n_rings[1][0] * self.n1), dtype=float) - e1_12_blocks_ten_to_pol = xp.zeros((self.n_polar[1][0], self.n_rings[1][1] * self.d1), dtype=float) + e1_11_blocks_ten_to_pol = np.zeros((self.n_polar[1][0], self.n_rings[1][0] * self.n1), dtype=float) + e1_12_blocks_ten_to_pol = np.zeros((self.n_polar[1][0], self.n_rings[1][1] * self.d1), dtype=float) - e1_21_blocks_ten_to_pol = xp.zeros((self.n_polar[1][1], self.n_rings[1][0] * self.n1), dtype=float) - e1_22_blocks_ten_to_pol = xp.zeros((self.n_polar[1][1], self.n_rings[1][1] * self.d1), dtype=float) + e1_21_blocks_ten_to_pol = np.zeros((self.n_polar[1][1], self.n_rings[1][0] * self.n1), dtype=float) + e1_22_blocks_ten_to_pol = np.zeros((self.n_polar[1][1], self.n_rings[1][1] * self.d1), dtype=float) # 1st component for l in range(2): @@ -135,7 +135,7 @@ def __init__(self, domain, derham): # =============== basis extraction operator for discrete 1-forms (Hdiv) ========= # first n_rings tp rings --> "polar coeffs" - e3_blocks_ten_to_pol = xp.zeros((self.n_polar[3][0], self.n_rings[3][0] * self.d1), dtype=float) + e3_blocks_ten_to_pol = np.zeros((self.n_polar[3][0], self.n_rings[3][0] * self.d1), dtype=float) self._e_ten_to_pol["2"] = [ [csr(e1_22_blocks_ten_to_pol), csr(-e1_21_blocks_ten_to_pol), None], @@ -161,7 +161,7 @@ def __init__(self, domain, derham): self._p_ten_to_ten = {} # first n_rings tp rings --> "polar coeffs" - p0_blocks_ten_to_pol = xp.zeros((self.n_polar[0][0], self.n_rings[0][0] * self.n1), dtype=float) + p0_blocks_ten_to_pol = np.zeros((self.n_polar[0][0], self.n_rings[0][0] * self.n1), dtype=float) # !! NOTE: for odd spline degrees and periodic splines the first Greville point sometimes does NOT start at zero!! if domain.p[1] % 2 != 0 and not (abs(derham.Vh_fem["0"].spaces[1].interpolation_grid[0]) < 1e-14): @@ -176,15 +176,15 @@ def __init__(self, domain, derham): self._p_ten_to_pol["0"] = [[csr(p0_blocks_ten_to_pol)]] # first n_rings + 1 tp rings --> "first tp ring" - p0_blocks_ten_to_ten = xp.block([0 * xp.identity(self.n1)] * self.n_rings[0][0] + [xp.identity(self.n1)]) + p0_blocks_ten_to_ten = np.block([0 * np.identity(self.n1)] * self.n_rings[0][0] + [np.identity(self.n1)]) self._p_ten_to_ten["0"] = [[csr(p0_blocks_ten_to_ten)]] # =========== projection extraction operator for discrete 1-forms (Hcurl) ======== # first n_rings tp rings --> "polar coeffs" - p1_11_blocks_ten_to_pol = xp.zeros((self.n_polar[1][0], self.n_rings[1][0] * self.n1), dtype=float) - p1_22_blocks_ten_to_pol = xp.zeros((self.n_polar[1][1], self.n_rings[1][1] * self.d1), dtype=float) + p1_11_blocks_ten_to_pol = np.zeros((self.n_polar[1][0], self.n_rings[1][0] * self.n1), dtype=float) + p1_22_blocks_ten_to_pol = np.zeros((self.n_polar[1][1], self.n_rings[1][1] * self.d1), dtype=float) # !! NOTE: PSYDAC's first integration interval sometimes start at < 0 !! if derham.Vh_fem["3"].spaces[1].histopolation_grid[0] < -1e-14: @@ -196,8 +196,8 @@ def __init__(self, domain, derham): p1_22_blocks_ten_to_pol[1, (self.d1 + 0 * self.d1 // 3) : (self.d1 + 1 * self.d1 // 3)] = 1.0 p1_22_blocks_ten_to_pol[1, (self.d1 + 1 * self.d1 // 3) : (self.d1 + 2 * self.d1 // 3)] = 1.0 - p1_12_blocks_ten_to_pol = xp.zeros((self.n_polar[1][0], self.n_rings[1][1] * self.d1), dtype=float) - p1_21_blocks_ten_to_pol = xp.zeros((self.n_polar[1][1], self.n_rings[1][0] * self.d1), dtype=float) + p1_12_blocks_ten_to_pol = np.zeros((self.n_polar[1][0], self.n_rings[1][1] * self.d1), dtype=float) + p1_21_blocks_ten_to_pol = np.zeros((self.n_polar[1][1], self.n_rings[1][0] * self.d1), dtype=float) self._p_ten_to_pol["1"] = [ [csr(p1_11_blocks_ten_to_pol), csr(p1_12_blocks_ten_to_pol), None], @@ -206,26 +206,26 @@ def __init__(self, domain, derham): ] # first n_rings + 1 tp rings --> "first tp ring" - p1_11_blocks_ten_to_ten = xp.zeros((self.n1, self.n1), dtype=float) + p1_11_blocks_ten_to_ten = np.zeros((self.n1, self.n1), dtype=float) # !! NOTE: for odd spline degrees and periodic splines the first Greville point sometimes does NOT start at zero!! if domain.p[1] % 2 != 0 and not (abs(derham.Vh_fem["0"].spaces[1].interpolation_grid[0]) < 1e-14): - p1_11_blocks_ten_to_ten[:, 3 * self.n1 // 3 - 1] = -xp.roll(self.xi_1[0], -1) - p1_11_blocks_ten_to_ten[:, 1 * self.n1 // 3 - 1] = -xp.roll(self.xi_1[1], -1) - p1_11_blocks_ten_to_ten[:, 2 * self.n1 // 3 - 1] = -xp.roll(self.xi_1[2], -1) + p1_11_blocks_ten_to_ten[:, 3 * self.n1 // 3 - 1] = -np.roll(self.xi_1[0], -1) + p1_11_blocks_ten_to_ten[:, 1 * self.n1 // 3 - 1] = -np.roll(self.xi_1[1], -1) + p1_11_blocks_ten_to_ten[:, 2 * self.n1 // 3 - 1] = -np.roll(self.xi_1[2], -1) else: p1_11_blocks_ten_to_ten[:, 0 * self.n1 // 3] = -self.xi_1[0] p1_11_blocks_ten_to_ten[:, 1 * self.n1 // 3] = -self.xi_1[1] p1_11_blocks_ten_to_ten[:, 2 * self.n1 // 3] = -self.xi_1[2] - p1_11_blocks_ten_to_ten += xp.identity(self.n1) + p1_11_blocks_ten_to_ten += np.identity(self.n1) - p1_11_blocks_ten_to_ten = xp.block([p1_11_blocks_ten_to_ten, xp.identity(self.n1)]) + p1_11_blocks_ten_to_ten = np.block([p1_11_blocks_ten_to_ten, np.identity(self.n1)]) - p1_22_blocks_ten_to_ten = xp.block([0 * xp.identity(self.d1)] * self.n_rings[1][1] + [xp.identity(self.d1)]) + p1_22_blocks_ten_to_ten = np.block([0 * np.identity(self.d1)] * self.n_rings[1][1] + [np.identity(self.d1)]) - p1_12_blocks_ten_to_ten = xp.zeros((self.d1, (self.n_rings[1][1] + 1) * self.d1), dtype=float) - p1_21_blocks_ten_to_ten = xp.zeros((self.n1, (self.n_rings[1][0] + 1) * self.n1), dtype=float) + p1_12_blocks_ten_to_ten = np.zeros((self.d1, (self.n_rings[1][1] + 1) * self.d1), dtype=float) + p1_21_blocks_ten_to_ten = np.zeros((self.n1, (self.n_rings[1][0] + 1) * self.n1), dtype=float) self._p_ten_to_ten["1"] = [ [csr(p1_11_blocks_ten_to_ten), csr(p1_12_blocks_ten_to_ten), None], @@ -236,7 +236,7 @@ def __init__(self, domain, derham): # ========== projection extraction operator for discrete 1-forms (Hdiv) ========== # first n_rings tp rings --> "polar coeffs" - p3_blocks_ten_to_pol = xp.zeros((self.n_polar[3][0], self.n_rings[3][0] * self.d1), dtype=float) + p3_blocks_ten_to_pol = np.zeros((self.n_polar[3][0], self.n_rings[3][0] * self.d1), dtype=float) self._p_ten_to_pol["2"] = [ [csr(p1_22_blocks_ten_to_pol), csr(p1_21_blocks_ten_to_pol), None], @@ -245,24 +245,24 @@ def __init__(self, domain, derham): ] # first n_rings + 1 tp rings --> "first tp ring" - p3_blocks_ten_to_ten = xp.zeros((self.d1, self.d1), dtype=float) + p3_blocks_ten_to_ten = np.zeros((self.d1, self.d1), dtype=float) - a0 = xp.diff(self.xi_1[1], append=self.xi_1[1, 0]) - a1 = xp.diff(self.xi_1[2], append=self.xi_1[2, 0]) + a0 = np.diff(self.xi_1[1], append=self.xi_1[1, 0]) + a1 = np.diff(self.xi_1[2], append=self.xi_1[2, 0]) # !! NOTE: PSYDAC's first integration interval sometimes start at < 0 !! if derham.Vh_fem["3"].spaces[1].histopolation_grid[0] < -1e-14: p3_blocks_ten_to_ten[:, (0 * self.n1 // 3 + 1) : (1 * self.n1 // 3 + 1)] = ( - -xp.roll(a0, +1)[:, None] - xp.roll(a1, +1)[:, None] + -np.roll(a0, +1)[:, None] - np.roll(a1, +1)[:, None] ) - p3_blocks_ten_to_ten[:, (1 * self.n1 // 3 + 1) : (2 * self.n1 // 3 + 1)] = -xp.roll(a1, +1)[:, None] + p3_blocks_ten_to_ten[:, (1 * self.n1 // 3 + 1) : (2 * self.n1 // 3 + 1)] = -np.roll(a1, +1)[:, None] else: p3_blocks_ten_to_ten[:, 0 * self.n1 // 3 : 1 * self.n1 // 3] = -a0[:, None] - a1[:, None] p3_blocks_ten_to_ten[:, 1 * self.n1 // 3 : 2 * self.n1 // 3] = -a1[:, None] - p3_blocks_ten_to_ten += xp.identity(self.d1) + p3_blocks_ten_to_ten += np.identity(self.d1) - p3_blocks_ten_to_ten = xp.block([p3_blocks_ten_to_ten, xp.identity(self.d1)]) + p3_blocks_ten_to_ten = np.block([p3_blocks_ten_to_ten, np.identity(self.d1)]) self._p_ten_to_ten["2"] = [ [csr(p1_22_blocks_ten_to_ten), csr(p1_21_blocks_ten_to_ten), None], @@ -295,24 +295,24 @@ def __init__(self, domain, derham): # ======================= discrete gradient ====================================== # "polar coeffs" to "polar coeffs" - grad_pol_to_pol_1 = xp.zeros((self.n_polar[1][0], self.n_polar[0][0]), dtype=float) - grad_pol_to_pol_2 = xp.array([[-1.0, 1.0, 0.0], [-1.0, 0.0, 1.0]]) - grad_pol_to_pol_3 = xp.identity(self.n_polar[0][0], dtype=float) + grad_pol_to_pol_1 = np.zeros((self.n_polar[1][0], self.n_polar[0][0]), dtype=float) + grad_pol_to_pol_2 = np.array([[-1.0, 1.0, 0.0], [-1.0, 0.0, 1.0]]) + grad_pol_to_pol_3 = np.identity(self.n_polar[0][0], dtype=float) self._grad_pol_to_pol = [[csr(grad_pol_to_pol_1)], [csr(grad_pol_to_pol_2)], [csr(grad_pol_to_pol_3)]] # "polar coeffs" to "first tp ring" - grad_pol_to_ten_1 = xp.zeros(((self.n_rings[1][0] + 1) * self.n1, self.n_polar[0][0])) - grad_pol_to_ten_2 = xp.zeros(((self.n_rings[1][1] + 1) * self.d1, self.n_polar[0][0])) - grad_pol_to_ten_3 = xp.zeros(((self.n_rings[0][0] + 1) * self.n1, self.n_polar[0][0])) + grad_pol_to_ten_1 = np.zeros(((self.n_rings[1][0] + 1) * self.n1, self.n_polar[0][0])) + grad_pol_to_ten_2 = np.zeros(((self.n_rings[1][1] + 1) * self.d1, self.n_polar[0][0])) + grad_pol_to_ten_3 = np.zeros(((self.n_rings[0][0] + 1) * self.n1, self.n_polar[0][0])) grad_pol_to_ten_1[-self.n1 :, :] = -self.xi_1.T self._grad_pol_to_ten = [[csr(grad_pol_to_ten_1)], [csr(grad_pol_to_ten_2)], [csr(grad_pol_to_ten_3)]] # eta_3 direction - grad_e3_1 = xp.identity(self.n2, dtype=float) - grad_e3_2 = xp.identity(self.n2, dtype=float) + grad_e3_1 = np.identity(self.n2, dtype=float) + grad_e3_2 = np.identity(self.n2, dtype=float) grad_e3_3 = grad_1d_matrix(derham.spl_kind[2], self.n2) self._grad_e3 = [[csr(grad_e3_1)], [csr(grad_e3_2)], [csr(grad_e3_3)]] @@ -320,14 +320,14 @@ def __init__(self, domain, derham): # =========================== discrete curl ====================================== # "polar coeffs" to "polar coeffs" - curl_pol_to_pol_12 = xp.identity(self.n_polar[1][1], dtype=float) - curl_pol_to_pol_13 = xp.array([[-1.0, 1.0, 0.0], [-1.0, 0.0, 1.0]]) + curl_pol_to_pol_12 = np.identity(self.n_polar[1][1], dtype=float) + curl_pol_to_pol_13 = np.array([[-1.0, 1.0, 0.0], [-1.0, 0.0, 1.0]]) - curl_pol_to_pol_21 = xp.identity(self.n_polar[1][0], dtype=float) - curl_pol_to_pol_23 = xp.zeros((self.n_polar[2][1], self.n_polar[0][0]), dtype=float) + curl_pol_to_pol_21 = np.identity(self.n_polar[1][0], dtype=float) + curl_pol_to_pol_23 = np.zeros((self.n_polar[2][1], self.n_polar[0][0]), dtype=float) - curl_pol_to_pol_31 = xp.zeros((self.n_polar[3][0], self.n_polar[1][0]), dtype=float) - curl_pol_to_pol_32 = xp.zeros((self.n_polar[3][0], self.n_polar[1][1]), dtype=float) + curl_pol_to_pol_31 = np.zeros((self.n_polar[3][0], self.n_polar[1][0]), dtype=float) + curl_pol_to_pol_32 = np.zeros((self.n_polar[3][0], self.n_polar[1][1]), dtype=float) self._curl_pol_to_pol = [ [None, csr(-curl_pol_to_pol_12), csr(curl_pol_to_pol_13)], @@ -336,14 +336,14 @@ def __init__(self, domain, derham): ] # "polar coeffs" to "first tp ring" - curl_pol_to_ten_12 = xp.zeros(((self.n_rings[2][0] + 1) * self.d1, self.n_polar[1][1])) - curl_pol_to_ten_13 = xp.zeros(((self.n_rings[2][0] + 1) * self.d1, self.n_polar[0][0])) + curl_pol_to_ten_12 = np.zeros(((self.n_rings[2][0] + 1) * self.d1, self.n_polar[1][1])) + curl_pol_to_ten_13 = np.zeros(((self.n_rings[2][0] + 1) * self.d1, self.n_polar[0][0])) - curl_pol_to_ten_21 = xp.zeros(((self.n_rings[2][1] + 1) * self.n1, self.n_polar[1][0])) - curl_pol_to_ten_23 = xp.zeros(((self.n_rings[2][1] + 1) * self.n1, self.n_polar[0][0])) + curl_pol_to_ten_21 = np.zeros(((self.n_rings[2][1] + 1) * self.n1, self.n_polar[1][0])) + curl_pol_to_ten_23 = np.zeros(((self.n_rings[2][1] + 1) * self.n1, self.n_polar[0][0])) - curl_pol_to_ten_31 = xp.zeros(((self.n_rings[3][0] + 1) * self.n1, self.n_polar[1][0])) - curl_pol_to_ten_32 = xp.zeros(((self.n_rings[3][0] + 1) * self.d1, self.n_polar[1][1])) + curl_pol_to_ten_31 = np.zeros(((self.n_rings[3][0] + 1) * self.n1, self.n_polar[1][0])) + curl_pol_to_ten_32 = np.zeros(((self.n_rings[3][0] + 1) * self.d1, self.n_polar[1][1])) curl_pol_to_ten_23[-self.n1 :, :] = -self.xi_1.T @@ -361,13 +361,13 @@ def __init__(self, domain, derham): # eta_3 direction curl_e3_12 = grad_1d_matrix(derham.spl_kind[2], self.n2) - curl_e3_13 = xp.identity(self.d2) + curl_e3_13 = np.identity(self.d2) curl_e3_21 = grad_1d_matrix(derham.spl_kind[2], self.n2) - curl_e3_23 = xp.identity(self.d2) + curl_e3_23 = np.identity(self.d2) - curl_e3_31 = xp.identity(self.n2) - curl_e3_32 = xp.identity(self.n2) + curl_e3_31 = np.identity(self.n2) + curl_e3_32 = np.identity(self.n2) self._curl_e3 = [ [None, csr(curl_e3_12), csr(curl_e3_13)], @@ -378,16 +378,16 @@ def __init__(self, domain, derham): # =========================== discrete div ====================================== # "polar coeffs" to "polar coeffs" - div_pol_to_pol_1 = xp.zeros((self.n_polar[3][0], self.n_polar[2][0]), dtype=float) - div_pol_to_pol_2 = xp.zeros((self.n_polar[3][0], self.n_polar[2][1]), dtype=float) - div_pol_to_pol_3 = xp.identity(self.n_polar[3][0], dtype=float) + div_pol_to_pol_1 = np.zeros((self.n_polar[3][0], self.n_polar[2][0]), dtype=float) + div_pol_to_pol_2 = np.zeros((self.n_polar[3][0], self.n_polar[2][1]), dtype=float) + div_pol_to_pol_3 = np.identity(self.n_polar[3][0], dtype=float) self._div_pol_to_pol = [[csr(div_pol_to_pol_1), csr(div_pol_to_pol_2), csr(div_pol_to_pol_3)]] # "polar coeffs" to "first tp ring" - div_pol_to_ten_1 = xp.zeros(((self.n_rings[3][0] + 1) * self.d1, self.n_polar[2][0])) - div_pol_to_ten_2 = xp.zeros(((self.n_rings[3][0] + 1) * self.d1, self.n_polar[2][1])) - div_pol_to_ten_3 = xp.zeros(((self.n_rings[3][0] + 1) * self.d1, self.n_polar[3][0])) + div_pol_to_ten_1 = np.zeros(((self.n_rings[3][0] + 1) * self.d1, self.n_polar[2][0])) + div_pol_to_ten_2 = np.zeros(((self.n_rings[3][0] + 1) * self.d1, self.n_polar[2][1])) + div_pol_to_ten_3 = np.zeros(((self.n_rings[3][0] + 1) * self.d1, self.n_polar[3][0])) for l in range(2): for j in range(self.d1, 2 * self.d1): @@ -398,8 +398,8 @@ def __init__(self, domain, derham): self._div_pol_to_ten = [[csr(div_pol_to_ten_1), csr(div_pol_to_ten_2), csr(div_pol_to_ten_3)]] # eta_3 direction - div_e3_1 = xp.identity(self.d2, dtype=float) - div_e3_2 = xp.identity(self.d2, dtype=float) + div_e3_1 = np.identity(self.d2, dtype=float) + div_e3_2 = np.identity(self.d2, dtype=float) div_e3_3 = grad_1d_matrix(derham.spl_kind[2], self.n2) self._div_e3 = [[csr(div_e3_1), csr(div_e3_2), csr(div_e3_3)]] @@ -539,13 +539,13 @@ def __init__(self, n0, n1): # =========== extraction operators for discrete 0-forms ================== # extraction operator for basis functions - self.E0_11 = spa.csr_matrix(xp.ones((1, n1), dtype=float)) + self.E0_11 = spa.csr_matrix(np.ones((1, n1), dtype=float)) self.E0_22 = spa.identity((n0 - 1) * n1, format="csr") self.E0 = spa.bmat([[self.E0_11, None], [None, self.E0_22]], format="csr") # global projection extraction operator for interpolation points - self.P0_11 = xp.zeros((1, n1), dtype=float) + self.P0_11 = np.zeros((1, n1), dtype=float) self.P0_11[0, 0] = 1.0 @@ -598,7 +598,7 @@ def __init__(self, n0, n1): # ========= discrete polar gradient matrix =============================== # radial dofs (DN) - G11 = xp.zeros(((d0 - 0) * n1, 1), dtype=float) + G11 = np.zeros(((d0 - 0) * n1, 1), dtype=float) G11[:n1, 0] = -1.0 G12 = spa.kron(grad_1d_1[:, 1:], spa.identity(n1)) @@ -606,7 +606,7 @@ def __init__(self, n0, n1): self.G1 = spa.bmat([[G11, G12]], format="csr") # angular dofs (ND) - G21 = xp.zeros(((n0 - 1) * d1, 1), dtype=float) + G21 = np.zeros(((n0 - 1) * d1, 1), dtype=float) G22 = spa.kron(spa.identity(n0 - 1), grad_1d_2, format="csr") self.G2 = spa.bmat([[G21, G22]], format="csr") @@ -619,13 +619,13 @@ def __init__(self, n0, n1): # 2D vector curl (NN --> ND DN) # angular dofs (ND) - VC11 = xp.zeros(((n0 - 1) * d1, 1), dtype=float) + VC11 = np.zeros(((n0 - 1) * d1, 1), dtype=float) VC12 = spa.kron(spa.identity(n0 - 1), grad_1d_2, format="csr") self.VC1 = spa.bmat([[VC11, VC12]], format="csr") # radial dofs (DN) - VC21 = xp.zeros(((d0 - 0) * n1, 1), dtype=float) + VC21 = np.zeros(((d0 - 0) * n1, 1), dtype=float) VC21[:n1, 0] = 1.0 VC22 = -spa.kron(grad_1d_1[:, 1:], spa.identity(n1)) @@ -687,26 +687,26 @@ def __init__(self, cx, cy): self.Nbase2 = (d0 - 1) * d1 # size of control triangle - self.tau = xp.array( + self.tau = np.array( [ (-2 * (cx[1] - self.x0)).max(), - ((cx[1] - self.x0) - xp.sqrt(3) * (cy[1] - self.y0)).max(), - ((cx[1] - self.x0) + xp.sqrt(3) * (cy[1] - self.y0)).max(), - ], + ((cx[1] - self.x0) - np.sqrt(3) * (cy[1] - self.y0)).max(), + ((cx[1] - self.x0) + np.sqrt(3) * (cy[1] - self.y0)).max(), + ] ).max() - self.Xi_0 = xp.zeros((3, n1), dtype=float) - self.Xi_1 = xp.zeros((3, n1), dtype=float) + self.Xi_0 = np.zeros((3, n1), dtype=float) + self.Xi_1 = np.zeros((3, n1), dtype=float) # barycentric coordinates self.Xi_0[:, :] = 1 / 3 self.Xi_1[0, :] = 1 / 3 + 2 / (3 * self.tau) * (cx[1] - self.x0) self.Xi_1[1, :] = ( - 1 / 3 - 1 / (3 * self.tau) * (cx[1] - self.x0) + xp.sqrt(3) / (3 * self.tau) * (cy[1] - self.y0) + 1 / 3 - 1 / (3 * self.tau) * (cx[1] - self.x0) + np.sqrt(3) / (3 * self.tau) * (cy[1] - self.y0) ) self.Xi_1[2, :] = ( - 1 / 3 - 1 / (3 * self.tau) * (cx[1] - self.x0) - xp.sqrt(3) / (3 * self.tau) * (cy[1] - self.y0) + 1 / 3 - 1 / (3 * self.tau) * (cx[1] - self.x0) - np.sqrt(3) / (3 * self.tau) * (cy[1] - self.y0) ) # remove small values @@ -714,13 +714,13 @@ def __init__(self, cx, cy): # =========== extraction operators for discrete 0-forms ================== # extraction operator for basis functions - self.E0_11 = spa.csr_matrix(xp.hstack((self.Xi_0, self.Xi_1))) + self.E0_11 = spa.csr_matrix(np.hstack((self.Xi_0, self.Xi_1))) self.E0_22 = spa.identity((n0 - 2) * n1, format="csr") self.E0 = spa.bmat([[self.E0_11, None], [None, self.E0_22]], format="csr") # global projection extraction operator for interpolation points - self.P0_11 = xp.zeros((3, 2 * n1), dtype=float) + self.P0_11 = np.zeros((3, 2 * n1), dtype=float) self.P0_11[0, n1 + 0 * n1 // 3] = 1.0 self.P0_11[1, n1 + 1 * n1 // 3] = 1.0 @@ -737,8 +737,8 @@ def __init__(self, cx, cy): self.E1C_12 = spa.identity((d0 - 1) * n1) self.E1C_34 = spa.identity((n0 - 2) * d1) - self.E1C_21 = xp.zeros((2, 1 * n1), dtype=float) - self.E1C_23 = xp.zeros((2, 2 * d1), dtype=float) + self.E1C_21 = np.zeros((2, 1 * n1), dtype=float) + self.E1C_23 = np.zeros((2, 2 * d1), dtype=float) # 1st component for s in range(2): @@ -760,22 +760,22 @@ def __init__(self, cx, cy): # extraction operator for interpolation/histopolation in global projector # 1st component - self.P1C_11 = xp.zeros((n1, n1), dtype=float) + self.P1C_11 = np.zeros((n1, n1), dtype=float) self.P1C_12 = spa.identity(n1) self.P1C_23 = spa.identity((d0 - 2) * n1) self.P1C_11[:, 0 * n1 // 3] = -self.Xi_1[0] self.P1C_11[:, 1 * n1 // 3] = -self.Xi_1[1] self.P1C_11[:, 2 * n1 // 3] = -self.Xi_1[2] - self.P1C_11 += xp.identity(n1) + self.P1C_11 += np.identity(n1) # 2nd component - self.P1C_34 = xp.zeros((2, 2 * d1), dtype=float) + self.P1C_34 = np.zeros((2, 2 * d1), dtype=float) self.P1C_45 = spa.identity((n0 - 2) * d1) - self.P1C_34[0, (d1 + 0 * d1 // 3) : (d1 + 1 * d1 // 3)] = xp.ones(d1 // 3, dtype=float) - self.P1C_34[1, (d1 + 0 * d1 // 3) : (d1 + 1 * d1 // 3)] = xp.ones(d1 // 3, dtype=float) - self.P1C_34[1, (d1 + 1 * d1 // 3) : (d1 + 2 * d1 // 3)] = xp.ones(d1 // 3, dtype=float) + self.P1C_34[0, (d1 + 0 * d1 // 3) : (d1 + 1 * d1 // 3)] = np.ones(d1 // 3, dtype=float) + self.P1C_34[1, (d1 + 0 * d1 // 3) : (d1 + 1 * d1 // 3)] = np.ones(d1 // 3, dtype=float) + self.P1C_34[1, (d1 + 1 * d1 // 3) : (d1 + 2 * d1 // 3)] = np.ones(d1 // 3, dtype=float) # combined first and second component self.P1C = spa.bmat( @@ -790,8 +790,8 @@ def __init__(self, cx, cy): # ========================================================================= # ========= extraction operators for discrete 1-forms (H_div) ============= - self.E1D_11 = xp.zeros((2, 2 * d1), dtype=float) - self.E1D_13 = xp.zeros((2, 1 * n1), dtype=float) + self.E1D_11 = np.zeros((2, 2 * d1), dtype=float) + self.E1D_13 = np.zeros((2, 1 * n1), dtype=float) self.E1D_22 = spa.identity((n0 - 2) * d1) self.E1D_34 = spa.identity((d0 - 1) * n1) @@ -834,13 +834,13 @@ def __init__(self, cx, cy): # ========================================================================= # =========== extraction operators for discrete 2-forms =================== - self.E2_1 = xp.zeros(((d0 - 1) * d1, d1), dtype=float) + self.E2_1 = np.zeros(((d0 - 1) * d1, d1), dtype=float) self.E2_2 = spa.identity((d0 - 1) * d1) self.E2 = spa.bmat([[self.E2_1, self.E2_2]], format="csr") # extraction operator for histopolation in global projector - self.P2_11 = xp.zeros((d1, d1), dtype=float) + self.P2_11 = np.zeros((d1, d1), dtype=float) self.P2_12 = spa.identity(d1) self.P2_23 = spa.identity((d0 - 2) * d1) @@ -853,7 +853,7 @@ def __init__(self, cx, cy): # block B self.P2_11[i, 1 * n1 // 3 : 2 * n1 // 3] = -(self.Xi_1[2, (i + 1) % n1] - self.Xi_1[2, i]) - self.P2_11 += xp.identity(d1) + self.P2_11 += np.identity(d1) self.P2 = spa.bmat([[self.P2_11, self.P2_12, None], [None, None, self.P2_23]], format="csr") # ========================================================================= @@ -864,14 +864,14 @@ def __init__(self, cx, cy): # ========================================================================= # ========= discrete polar gradient matrix ================================ - self.G1_1 = xp.zeros(((d0 - 1) * n1, 3), dtype=float) + self.G1_1 = np.zeros(((d0 - 1) * n1, 3), dtype=float) self.G1_1[:n1, :] = -self.Xi_1.T self.G1_2 = spa.kron(grad_1d_1[1:, 2:], spa.identity(n1)) self.G1 = spa.bmat([[self.G1_1, self.G1_2]], format="csr") - self.G2_11 = xp.zeros((2, 3), dtype=float) + self.G2_11 = np.zeros((2, 3), dtype=float) self.G2_11[0, 0] = -1.0 self.G2_11[0, 1] = 1.0 @@ -888,7 +888,7 @@ def __init__(self, cx, cy): # ========= discrete polar curl matrix =================================== # 2D vector curl - self.VC1_11 = xp.zeros((2, 3), dtype=float) + self.VC1_11 = np.zeros((2, 3), dtype=float) self.VC1_11[0, 0] = -1.0 self.VC1_11[0, 1] = 1.0 @@ -900,7 +900,7 @@ def __init__(self, cx, cy): self.VC1 = spa.bmat([[self.VC1_11, None], [None, self.VC1_22]], format="csr") - self.VC2_11 = xp.zeros(((d0 - 1) * n1, 3), dtype=float) + self.VC2_11 = np.zeros(((d0 - 1) * n1, 3), dtype=float) self.VC2_11[:n1, :] = -self.Xi_1.T self.VC2_22 = spa.kron(grad_1d_1[1:, 2:], spa.identity(n1)) @@ -912,7 +912,7 @@ def __init__(self, cx, cy): # 2D scalar curl self.SC1 = -spa.kron(spa.identity(d0 - 1), grad_1d_2) - self.SC2_1 = xp.zeros(((d0 - 1) * d1, 2), dtype=float) + self.SC2_1 = np.zeros(((d0 - 1) * d1, 2), dtype=float) for s in range(2): for j in range(d1): @@ -926,7 +926,7 @@ def __init__(self, cx, cy): # ========================================================================= # ========= discrete polar div matrix ===================================== - self.D1_1 = xp.zeros(((d0 - 1) * d1, 2), dtype=float) + self.D1_1 = np.zeros(((d0 - 1) * d1, 2), dtype=float) for s in range(2): for j in range(d1): @@ -965,25 +965,24 @@ def __init__(self, tensor_space, cx, cy): self.Nbase3_pol = (d0 - 1) * d1 # size of control triangle - self.tau = xp.array( - [(-2 * cx[1]).max(), (cx[1] - xp.sqrt(3) * cy[1]).max(), (cx[1] + xp.sqrt(3) * cy[1]).max()], + self.tau = np.array( + [(-2 * cx[1]).max(), (cx[1] - np.sqrt(3) * cy[1]).max(), (cx[1] + np.sqrt(3) * cy[1]).max()] ).max() - self.Xi_0 = xp.zeros((3, n1), dtype=float) - self.Xi_1 = xp.zeros((3, n1), dtype=float) + self.Xi_0 = np.zeros((3, n1), dtype=float) + self.Xi_1 = np.zeros((3, n1), dtype=float) # barycentric coordinates self.Xi_0[:, :] = 1 / 3 self.Xi_1[0, :] = 1 / 3 + 2 / (3 * self.tau) * cx[1, :, 0] - self.Xi_1[1, :] = 1 / 3 - 1 / (3 * self.tau) * cx[1, :, 0] + xp.sqrt(3) / (3 * self.tau) * cy[1, :, 0] - self.Xi_1[2, :] = 1 / 3 - 1 / (3 * self.tau) * cx[1, :, 0] - xp.sqrt(3) / (3 * self.tau) * cy[1, :, 0] + self.Xi_1[1, :] = 1 / 3 - 1 / (3 * self.tau) * cx[1, :, 0] + np.sqrt(3) / (3 * self.tau) * cy[1, :, 0] + self.Xi_1[2, :] = 1 / 3 - 1 / (3 * self.tau) * cx[1, :, 0] - np.sqrt(3) / (3 * self.tau) * cy[1, :, 0] # =========== extraction operators for discrete 0-forms ================== # extraction operator for basis functions self.E0_pol = spa.bmat( - [[xp.hstack((self.Xi_0, self.Xi_1)), None], [None, spa.identity((n0 - 2) * n1)]], - format="csr", + [[np.hstack((self.Xi_0, self.Xi_1)), None], [None, spa.identity((n0 - 2) * n1)]], format="csr" ) self.E0 = spa.kron(self.E0_pol, spa.identity(n2), format="csr") @@ -1006,7 +1005,7 @@ def __init__(self, tensor_space, cx, cy): for j in range(n1): self.E1_1_pol[(d0 - 1) * n1 + s, j] = self.Xi_1[s + 1, j] - self.Xi_0[s + 1, j] - self.E1_1_pol[: (d0 - 1) * n1, n1:] = xp.identity((d0 - 1) * n1) + self.E1_1_pol[: (d0 - 1) * n1, n1:] = np.identity((d0 - 1) * n1) self.E1_1_pol = self.E1_1_pol.tocsr() # 2nd component @@ -1015,7 +1014,7 @@ def __init__(self, tensor_space, cx, cy): self.E1_2_pol[(d0 - 1) * n1 + s, j] = 0.0 self.E1_2_pol[(d0 - 1) * n1 + s, n1 + j] = self.Xi_1[s + 1, (j + 1) % n1] - self.Xi_1[s + 1, j] - self.E1_2_pol[((d0 - 1) * n1 + 2) :, 2 * d1 :] = xp.identity((n0 - 2) * d1) + self.E1_2_pol[((d0 - 1) * n1 + 2) :, 2 * d1 :] = np.identity((n0 - 2) * d1) self.E1_2_pol = self.E1_2_pol.tocsr() # 3rd component @@ -1044,9 +1043,9 @@ def __init__(self, tensor_space, cx, cy): self.P1_1_pol = self.P1_1_pol.tocsr() # 2nd component - self.P1_2_pol[0, (n1 + 0 * n1 // 3) : (n1 + 1 * n1 // 3)] = xp.ones((1, n1 // 3), dtype=float) - self.P1_2_pol[1, (n1 + 0 * n1 // 3) : (n1 + 1 * n1 // 3)] = xp.ones((1, n1 // 3), dtype=float) - self.P1_2_pol[1, (n1 + 1 * n1 // 3) : (n1 + 2 * n1 // 3)] = xp.ones((1, n1 // 3), dtype=float) + self.P1_2_pol[0, (n1 + 0 * n1 // 3) : (n1 + 1 * n1 // 3)] = np.ones((1, n1 // 3), dtype=float) + self.P1_2_pol[1, (n1 + 0 * n1 // 3) : (n1 + 1 * n1 // 3)] = np.ones((1, n1 // 3), dtype=float) + self.P1_2_pol[1, (n1 + 1 * n1 // 3) : (n1 + 2 * n1 // 3)] = np.ones((1, n1 // 3), dtype=float) self.P1_2_pol[2:, 2 * n1 :] = spa.identity((n0 - 2) * d1) self.P1_2_pol = self.P1_2_pol.tocsr() @@ -1074,7 +1073,7 @@ def __init__(self, tensor_space, cx, cy): self.E2_1_pol[s, j] = 0.0 self.E2_1_pol[s, n1 + j] = self.Xi_1[s + 1, (j + 1) % n1] - self.Xi_1[s + 1, j] - self.E2_1_pol[2 : (2 + (n0 - 2) * d1), 2 * n1 :] = xp.identity((n0 - 2) * d1) + self.E2_1_pol[2 : (2 + (n0 - 2) * d1), 2 * n1 :] = np.identity((n0 - 2) * d1) self.E2_1_pol = self.E2_1_pol.tocsr() # 2nd component @@ -1082,11 +1081,11 @@ def __init__(self, tensor_space, cx, cy): for j in range(n1): self.E2_2_pol[s, j] = -(self.Xi_1[s + 1, j] - self.Xi_0[s + 1, j]) - self.E2_2_pol[(2 + (n0 - 2) * d1) :, 1 * n1 :] = xp.identity((d0 - 1) * n1) + self.E2_2_pol[(2 + (n0 - 2) * d1) :, 1 * n1 :] = np.identity((d0 - 1) * n1) self.E2_2_pol = self.E2_2_pol.tocsr() # 3rd component - self.E2_3_pol[:, 1 * d1 :] = xp.identity((d0 - 1) * d1) + self.E2_3_pol[:, 1 * d1 :] = np.identity((d0 - 1) * d1) self.E2_3_pol = self.E2_3_pol.tocsr() # combined first and second component @@ -1216,7 +1215,7 @@ def __init__(self, tensor_space, cx, cy): [ [None, -spa.kron(spa.identity((n0 - 2) * d1 + 2), grad_1d_3)], [spa.kron(spa.identity((d0 - 1) * n1), grad_1d_3), None], - ], + ] ) # total polar curl diff --git a/src/struphy/polar/linear_operators.py b/src/struphy/polar/linear_operators.py index 0e37c7e76..019b7aae0 100644 --- a/src/struphy/polar/linear_operators.py +++ b/src/struphy/polar/linear_operators.py @@ -1,4 +1,3 @@ -import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from psydac.linalg.block import BlockVector, BlockVectorSpace from psydac.linalg.stencil import StencilVector, StencilVectorSpace @@ -7,6 +6,7 @@ from struphy.feec.linear_operators import LinOpWithTransp from struphy.linear_algebra.linalg_kron import kron_matvec_2d from struphy.polar.basic import PolarDerhamSpace, PolarVector +from struphy.utils.arrays import xp as np class PolarExtractionOperator(LinOpWithTransp): @@ -334,14 +334,7 @@ class PolarLinearOperator(LinOpWithTransp): """ def __init__( - self, - V, - W, - tp_operator=None, - blocks_pol_to_ten=None, - blocks_pol_to_pol=None, - blocks_e3=None, - transposed=False, + self, V, W, tp_operator=None, blocks_pol_to_ten=None, blocks_pol_to_pol=None, blocks_e3=None, transposed=False ): assert isinstance(V, PolarDerhamSpace) assert isinstance(W, PolarDerhamSpace) @@ -675,7 +668,7 @@ def dot_inner_tp_rings(blocks_e1_e2, blocks_e3, v, out): # loop over codomain components for m, (row_e1_e2, row_e3) in enumerate(zip(blocks_e1_e2, blocks_e3)): - res = xp.zeros((n_rows[m], n3_out[m]), dtype=float) + res = np.zeros((n_rows[m], n3_out[m]), dtype=float) # loop over domain components for n, (block_e1_e2, block_e3) in enumerate(zip(row_e1_e2, row_e3)): @@ -684,7 +677,7 @@ def dot_inner_tp_rings(blocks_e1_e2, blocks_e3, v, out): e1, e2, e3 = in_ends[n] if block_e1_e2 is not None: - tmp = xp.zeros((n_rings_in[n], n2, n3_in[n]), dtype=float) + tmp = np.zeros((n_rings_in[n], n2, n3_in[n]), dtype=float) tmp[:, s2 : e2 + 1, s3 : e3 + 1] = in_vec[n][0 : n_rings_in[n], s2 : e2 + 1, s3 : e3 + 1] res += kron_matvec_2d([block_e1_e2, block_e3], tmp.reshape(n_rings_in[n] * n2, n3_in[n])) @@ -792,7 +785,7 @@ def dot_parts_of_polar(blocks_e1_e2, blocks_e3, v, out): # loop over codomain components for m, (row_e1_e2, row_e3) in enumerate(zip(blocks_e1_e2, blocks_e3)): - res = xp.zeros((n_rings_out[m], n2, n3_out[m]), dtype=float) + res = np.zeros((n_rings_out[m], n2, n3_out[m]), dtype=float) # loop over domain components for n, (block_e1_e2, block_e3) in enumerate(zip(row_e1_e2, row_e3)): @@ -801,7 +794,7 @@ def dot_parts_of_polar(blocks_e1_e2, blocks_e3, v, out): if in_starts[n][0] == 0: s1, s2, s3 = in_starts[n] e1, e2, e3 = in_ends[n] - tmp = xp.zeros((n2, n3_in[n]), dtype=float) + tmp = np.zeros((n2, n3_in[n]), dtype=float) tmp[s2 : e2 + 1, s3 : e3 + 1] = in_tp[n][n_rings_in[n], s2 : e2 + 1, s3 : e3 + 1] res += kron_matvec_2d([block_e1_e2, block_e3], tmp).reshape(n_rings_out[m], n2, n3_out[m]) else: diff --git a/src/struphy/polar/tests/test_legacy_polar_splines.py b/src/struphy/polar/tests/test_legacy_polar_splines.py index be2bfb654..b9665ae0b 100644 --- a/src/struphy/polar/tests/test_legacy_polar_splines.py +++ b/src/struphy/polar/tests/test_legacy_polar_splines.py @@ -7,12 +7,12 @@ def test_polar_splines_2D(plot=False): sys.path.append("..") - import cunumpy as xp import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space from struphy.geometry import domains + from struphy.utils.arrays import xp as np # parameters # number of elements (number of elements in angular direction must be a multiple of 3) @@ -42,8 +42,8 @@ def test_polar_splines_2D(plot=False): fig.set_figheight(10) fig.set_figwidth(10) - el_b_1 = xp.linspace(0.0, 1.0, Nel[0] + 1) - el_b_2 = xp.linspace(0.0, 1.0, Nel[1] + 1) + el_b_1 = np.linspace(0.0, 1.0, Nel[0] + 1) + el_b_2 = np.linspace(0.0, 1.0, Nel[1] + 1) grid_x = domain(el_b_1, el_b_2, 0.0, squeeze_out=True)[0] grid_y = domain(el_b_1, el_b_2, 0.0, squeeze_out=True)[1] @@ -108,7 +108,7 @@ def test_polar_splines_2D(plot=False): ) # plot three new polar splines in V0 - etaplot = [xp.linspace(0.0, 1.0, 200), xp.linspace(0.0, 1.0, 200)] + etaplot = [np.linspace(0.0, 1.0, 200), np.linspace(0.0, 1.0, 200)] xplot = [ domain(etaplot[0], etaplot[1], 0.0, squeeze_out=True)[0], domain(etaplot[0], etaplot[1], 0.0, squeeze_out=True)[1], @@ -123,9 +123,9 @@ def test_polar_splines_2D(plot=False): ax3 = fig.add_subplot(133, projection="3d") # coeffs in polar basis - c0_pol1 = xp.zeros(space_2d.E0.shape[0], dtype=float) - c0_pol2 = xp.zeros(space_2d.E0.shape[0], dtype=float) - c0_pol3 = xp.zeros(space_2d.E0.shape[0], dtype=float) + c0_pol1 = np.zeros(space_2d.E0.shape[0], dtype=float) + c0_pol2 = np.zeros(space_2d.E0.shape[0], dtype=float) + c0_pol3 = np.zeros(space_2d.E0.shape[0], dtype=float) c0_pol1[0] = 1.0 c0_pol2[1] = 1.0 @@ -134,7 +134,7 @@ def test_polar_splines_2D(plot=False): ax1.plot_surface( xplot[0], xplot[1], - space_2d.evaluate_NN(etaplot[0], etaplot[1], xp.array([0.0]), c0_pol1, "V0")[:, :, 0], + space_2d.evaluate_NN(etaplot[0], etaplot[1], np.array([0.0]), c0_pol1, "V0")[:, :, 0], cmap="jet", ) ax1.set_xlabel("R [m]", labelpad=5) @@ -144,7 +144,7 @@ def test_polar_splines_2D(plot=False): ax2.plot_surface( xplot[0], xplot[1], - space_2d.evaluate_NN(etaplot[0], etaplot[1], xp.array([0.0]), c0_pol2, "V0")[:, :, 0], + space_2d.evaluate_NN(etaplot[0], etaplot[1], np.array([0.0]), c0_pol2, "V0")[:, :, 0], cmap="jet", ) ax2.set_xlabel("R [m]", labelpad=5) @@ -154,7 +154,7 @@ def test_polar_splines_2D(plot=False): ax3.plot_surface( xplot[0], xplot[1], - space_2d.evaluate_NN(etaplot[0], etaplot[1], xp.array([0.0]), c0_pol3, "V0")[:, :, 0], + space_2d.evaluate_NN(etaplot[0], etaplot[1], np.array([0.0]), c0_pol3, "V0")[:, :, 0], cmap="jet", ) ax3.set_xlabel("R [m]", labelpad=5) diff --git a/src/struphy/polar/tests/test_polar.py b/src/struphy/polar/tests/test_polar.py index ac0113c4f..b2b5c8326 100644 --- a/src/struphy/polar/tests/test_polar.py +++ b/src/struphy/polar/tests/test_polar.py @@ -167,7 +167,6 @@ def test_spaces(Nel, p, spl_kind): @pytest.mark.parametrize("p", [[3, 2, 2]]) @pytest.mark.parametrize("spl_kind", [[False, True, True], [False, True, False]]) def test_extraction_ops_and_derivatives(Nel, p, spl_kind): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space @@ -177,6 +176,7 @@ def test_extraction_ops_and_derivatives(Nel, p, spl_kind): from struphy.polar.basic import PolarDerhamSpace, PolarVector from struphy.polar.extraction_operators import PolarExtractionBlocksC1 from struphy.polar.linear_operators import PolarExtractionOperator, PolarLinearOperator + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -222,11 +222,11 @@ def test_extraction_ops_and_derivatives(Nel, p, spl_kind): b2_pol.tp = b2_tp p3_pol.tp = p3_tp - xp.random.seed(1607) - f0_pol.pol = [xp.random.rand(f0_pol.pol[0].shape[0], f0_pol.pol[0].shape[1])] - e1_pol.pol = [xp.random.rand(e1_pol.pol[n].shape[0], e1_pol.pol[n].shape[1]) for n in range(3)] - b2_pol.pol = [xp.random.rand(b2_pol.pol[n].shape[0], b2_pol.pol[n].shape[1]) for n in range(3)] - p3_pol.pol = [xp.random.rand(p3_pol.pol[0].shape[0], p3_pol.pol[0].shape[1])] + np.random.seed(1607) + f0_pol.pol = [np.random.rand(f0_pol.pol[0].shape[0], f0_pol.pol[0].shape[1])] + e1_pol.pol = [np.random.rand(e1_pol.pol[n].shape[0], e1_pol.pol[n].shape[1]) for n in range(3)] + b2_pol.pol = [np.random.rand(b2_pol.pol[n].shape[0], b2_pol.pol[n].shape[1]) for n in range(3)] + p3_pol.pol = [np.random.rand(p3_pol.pol[0].shape[0], p3_pol.pol[0].shape[1])] f0_pol_leg = f0_pol.toarray(True) e1_pol_leg = e1_pol.toarray(True) @@ -243,10 +243,10 @@ def test_extraction_ops_and_derivatives(Nel, p, spl_kind): r2_pol = derham.extraction_ops["2"].dot(b2_tp) r3_pol = derham.extraction_ops["3"].dot(p3_tp) - assert xp.allclose(r0_pol.toarray(True), space.E0.dot(f0_tp_leg)) - assert xp.allclose(r1_pol.toarray(True), space.E1.dot(e1_tp_leg)) - assert xp.allclose(r2_pol.toarray(True), space.E2.dot(b2_tp_leg)) - assert xp.allclose(r3_pol.toarray(True), space.E3.dot(p3_tp_leg)) + assert np.allclose(r0_pol.toarray(True), space.E0.dot(f0_tp_leg)) + assert np.allclose(r1_pol.toarray(True), space.E1.dot(e1_tp_leg)) + assert np.allclose(r2_pol.toarray(True), space.E2.dot(b2_tp_leg)) + assert np.allclose(r3_pol.toarray(True), space.E3.dot(p3_tp_leg)) # test transposed extraction operators E0T = derham.extraction_ops["0"].transpose() @@ -277,9 +277,9 @@ def test_extraction_ops_and_derivatives(Nel, p, spl_kind): r2_pol = derham.curl.dot(e1_pol) r3_pol = derham.div.dot(b2_pol) - assert xp.allclose(r1_pol.toarray(True), space.G.dot(f0_pol_leg)) - assert xp.allclose(r2_pol.toarray(True), space.C.dot(e1_pol_leg)) - assert xp.allclose(r3_pol.toarray(True), space.D.dot(b2_pol_leg)) + assert np.allclose(r1_pol.toarray(True), space.G.dot(f0_pol_leg)) + assert np.allclose(r2_pol.toarray(True), space.C.dot(e1_pol_leg)) + assert np.allclose(r3_pol.toarray(True), space.D.dot(b2_pol_leg)) # test transposed derivatives GT = derham.grad.transpose() @@ -290,9 +290,9 @@ def test_extraction_ops_and_derivatives(Nel, p, spl_kind): r1_pol = CT.dot(b2_pol) r2_pol = DT.dot(p3_pol) - assert xp.allclose(r0_pol.toarray(True), space.G.T.dot(e1_pol_leg)) - assert xp.allclose(r1_pol.toarray(True), space.C.T.dot(b2_pol_leg)) - assert xp.allclose(r2_pol.toarray(True), space.D.T.dot(p3_pol_leg)) + assert np.allclose(r0_pol.toarray(True), space.G.T.dot(e1_pol_leg)) + assert np.allclose(r1_pol.toarray(True), space.C.T.dot(b2_pol_leg)) + assert np.allclose(r2_pol.toarray(True), space.D.T.dot(p3_pol_leg)) if rank == 0: print("------------- Test passed ---------------------------") @@ -302,12 +302,12 @@ def test_extraction_ops_and_derivatives(Nel, p, spl_kind): @pytest.mark.parametrize("p", [[4, 3, 2]]) @pytest.mark.parametrize("spl_kind", [[False, True, True], [False, True, False]]) def test_projectors(Nel, p, spl_kind): - import cunumpy as xp from psydac.ddm.mpi import mpi as MPI from struphy.eigenvalue_solvers.spline_space import Spline_space_1d, Tensor_spline_space from struphy.feec.psydac_derham import Derham from struphy.geometry.domains import IGAPolarCylinder + from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -338,7 +338,7 @@ def test_projectors(Nel, p, spl_kind): # function to project on physical domain def fun_scalar(x, y, z): - return xp.sin(2 * xp.pi * (x)) * xp.cos(2 * xp.pi * y) * xp.sin(2 * xp.pi * z) + return np.sin(2 * np.pi * (x)) * np.cos(2 * np.pi * y) * np.sin(2 * np.pi * z) fun_vector = [fun_scalar, fun_scalar, fun_scalar] @@ -369,7 +369,7 @@ def fun3(e1, e2, e3): r0_pol_leg = space.projectors.pi_0(fun0) - assert xp.allclose(r0_pol.toarray(True), r0_pol_leg) + assert np.allclose(r0_pol.toarray(True), r0_pol_leg) if rank == 0: print("Test passed for PI_0 polar projector") @@ -385,7 +385,7 @@ def fun3(e1, e2, e3): r1_pol_leg = space.projectors.pi_1(fun1, with_subs=False) - assert xp.allclose(r1_pol.toarray(True), r1_pol_leg) + assert np.allclose(r1_pol.toarray(True), r1_pol_leg) if rank == 0: print("Test passed for PI_1 polar projector") @@ -401,7 +401,7 @@ def fun3(e1, e2, e3): r2_pol_leg = space.projectors.pi_2(fun2, with_subs=False) - assert xp.allclose(r2_pol.toarray(True), r2_pol_leg) + assert np.allclose(r2_pol.toarray(True), r2_pol_leg) if rank == 0: print("Test passed for PI_2 polar projector") @@ -417,7 +417,7 @@ def fun3(e1, e2, e3): r3_pol_leg = space.projectors.pi_3(fun3, with_subs=False) - assert xp.allclose(r3_pol.toarray(True), r3_pol_leg) + assert np.allclose(r3_pol.toarray(True), r3_pol_leg) if rank == 0: print("Test passed for PI_3 polar projector") diff --git a/src/struphy/post_processing/likwid/plot_likwidproject.py b/src/struphy/post_processing/likwid/plot_likwidproject.py index f4c3bb442..33b0426af 100644 --- a/src/struphy/post_processing/likwid/plot_likwidproject.py +++ b/src/struphy/post_processing/likwid/plot_likwidproject.py @@ -7,7 +7,6 @@ import re import sys -import cunumpy as xp import matplotlib.pyplot as plt import pandas as pd import plotly.express as px @@ -17,6 +16,7 @@ import struphy.post_processing.likwid.likwid_parser as lp import struphy.post_processing.likwid.maxplotlylib as mply import struphy.post_processing.likwid.roofline_plotter as rp +from struphy.utils.arrays import xp as np def clean_string(string_in): @@ -196,16 +196,16 @@ def plot_roofline( fig.update_xaxes( type="log", # Ensure the x-axis is logarithmic - range=[xp.log10(xmin), xp.log10(xmax)], + range=[np.log10(xmin), np.log10(xmax)], title="Operational intensity (FLOP/Byte)", tickvals=xtick_values, # Set where ticks appear ticktext=[str(t) for t in xtick_values], - # ticktext=[f'$10^{{{int(xp.log10(t))}}}$' for t in xtick_values] # Set tick labels + # ticktext=[f'$10^{{{int(np.log10(t))}}}$' for t in xtick_values] # Set tick labels ) fig.update_yaxes( type="log", # Ensure the x-axis is logarithmic - range=[xp.log10(ymin), xp.log10(ymax)], + range=[np.log10(ymin), np.log10(ymax)], title="Performance [GFLOP/s]", tickvals=ytick_values, # Set where ticks appear ticktext=[str(t) for t in ytick_values], @@ -387,7 +387,7 @@ def plot_speedup( fig.update_layout( # xaxis_title='Job name', - xaxis_title="MPI tasks (#)", + xaxis_title=f"MPI tasks (#)", yaxis_title=re.sub(r"\[.*?\]", "[relative]", metric2), showlegend=True, xaxis_tickformat=".1f", @@ -818,7 +818,7 @@ def load_projects(data_paths, procs_per_clone="any"): ) if (procs_per_clone != "any") and (procs_per_clone != project.procs_per_clone): print( - f"Incorrect number of procs_per_clone: {project.procs_per_clone =} {procs_per_clone =}", + f"Incorrect number of procs_per_clone: {project.procs_per_clone = } {procs_per_clone = }", ) continue project.read_project() diff --git a/src/struphy/post_processing/likwid/plot_time_traces.py b/src/struphy/post_processing/likwid/plot_time_traces.py index 7451833cb..4f3f4eeb8 100644 --- a/src/struphy/post_processing/likwid/plot_time_traces.py +++ b/src/struphy/post_processing/likwid/plot_time_traces.py @@ -2,13 +2,12 @@ import pickle import re -import cunumpy as xp import matplotlib.pyplot as plt -import plotly.graph_objects as go import plotly.io as pio # pio.kaleido.scope.mathjax = None import struphy.post_processing.likwid.maxplotlylib as mply +from struphy.utils.arrays import xp as np def glob_to_regex(pat: str) -> str: @@ -17,31 +16,19 @@ def glob_to_regex(pat: str) -> str: return "^" + esc.replace(r"\*", ".*").replace(r"\?", ".") + "$" -# def plot_region(region_name, groups_include=["*"], groups_skip=[]): -# # skips first -# for pat in groups_skip: -# rx = glob_to_regex(pat) -# if re.fullmatch(rx, region_name): -# return False - -# # includes next -# for pat in groups_include: -# rx = glob_to_regex(pat) -# if re.fullmatch(rx, region_name): -# return True - -# return False - - def plot_region(region_name, groups_include=["*"], groups_skip=[]): - from fnmatch import fnmatch - - for pattern in groups_skip: - if fnmatch(region_name, pattern): + # skips first + for pat in groups_skip: + rx = glob_to_regex(pat) + if re.fullmatch(rx, region_name): return False - for pattern in groups_include: - if fnmatch(region_name, pattern): + + # includes next + for pat in groups_include: + rx = glob_to_regex(pat) + if re.fullmatch(rx, region_name): return True + return False @@ -67,7 +54,7 @@ def plot_time_vs_duration( plt.figure(figsize=(10, 6)) for path in paths: - print(f"{path =}") + print(f"{path = }") with open(path, "rb") as file: profiling_data = pickle.load(file) @@ -134,9 +121,9 @@ def plot_avg_duration_bar_chart( # Compute statistics per region regions = sorted(region_durations.keys()) - avg_durations = [xp.mean(region_durations[r]) for r in regions] - min_durations = [xp.min(region_durations[r]) for r in regions] - max_durations = [xp.max(region_durations[r]) for r in regions] + avg_durations = [np.mean(region_durations[r]) for r in regions] + min_durations = [np.min(region_durations[r]) for r in regions] + max_durations = [np.max(region_durations[r]) for r in regions] yerr = [ [avg - min_ for avg, min_ in zip(avg_durations, min_durations)], [max_ - avg for avg, max_ in zip(avg_durations, max_durations)], @@ -144,7 +131,7 @@ def plot_avg_duration_bar_chart( # Plot bar chart with error bars (min-max spans) plt.figure(figsize=(12, 6)) - x = xp.arange(len(regions)) + x = np.arange(len(regions)) plt.bar(x, avg_durations, yerr=yerr, capsize=5, color="skyblue", edgecolor="k") plt.yscale("log") plt.xticks(x, regions, rotation=45, ha="right") @@ -159,6 +146,21 @@ def plot_avg_duration_bar_chart( print(f"Saved average duration bar chart to: {figure_path}") +import plotly.graph_objects as go + + +def plot_region(region_name, groups_include=["*"], groups_skip=[]): + from fnmatch import fnmatch + + for pattern in groups_skip: + if fnmatch(region_name, pattern): + return False + for pattern in groups_include: + if fnmatch(region_name, pattern): + return True + return False + + def plot_gantt_chart_plotly( path: str, output_path: str, @@ -173,7 +175,7 @@ def plot_gantt_chart_plotly( region_start_times = {} for rank_data in profiling_data["rank_data"].values(): for region_name, info in rank_data.items(): - first_start_time = xp.min(info["start_times"]) + first_start_time = np.min(info["start_times"]) if region_name not in region_start_times or first_start_time < region_start_times[region_name]: region_start_times[region_name] = first_start_time @@ -202,7 +204,7 @@ def plot_gantt_chart_plotly( Start=start_times[i], Finish=end_times[i], Duration=durations[i], - ), + ) ) if len(bars) == 0: @@ -224,7 +226,7 @@ def plot_gantt_chart_plotly( name=bar["Rank"], marker_color=rank_color_map[bar["Rank"]], hovertemplate=f"Rank: {bar['Rank']}
Start: {bar['Start']:.3f}s
Duration: {bar['Duration']:.3f}s", - ), + ) ) fig.update_layout( @@ -289,7 +291,7 @@ def plot_gantt_chart( region_start_times = {} for rank_data in profiling_data["rank_data"].values(): for region_name, info in rank_data.items(): - first_start_time = xp.min(info["start_times"]) + first_start_time = np.min(info["start_times"]) if region_name not in region_start_times or first_start_time < region_start_times[region_name]: region_start_times[region_name] = first_start_time diff --git a/src/struphy/post_processing/likwid/roofline_plotter.py b/src/struphy/post_processing/likwid/roofline_plotter.py index 3a4808bdc..6a49f9c34 100644 --- a/src/struphy/post_processing/likwid/roofline_plotter.py +++ b/src/struphy/post_processing/likwid/roofline_plotter.py @@ -1,10 +1,11 @@ import glob import pickle -import cunumpy as xp import pandas as pd import yaml +from struphy.utils.arrays import xp as np + def sort_by_num_threads(bm): sorted_arrays = {} @@ -142,14 +143,14 @@ def add_plot_diagonal( bandwidth_GBps, label="", ymax=1e4, - operational_intensity_FLOPpMB=xp.arange(0, 1000, 1), + operational_intensity_FLOPpMB=np.arange(0, 1000, 1), ): max_performance_GFLOP = operational_intensity_FLOPpMB * bandwidth_GBps (line,) = mfig.axs.plot(operational_intensity_FLOPpMB, max_performance_GFLOP) # Specify the y-value where you want to place the text specific_y = ymax # Interpolate to find the corresponding x-value - specific_x = xp.interp( + specific_x = np.interp( specific_y, max_performance_GFLOP, operational_intensity_FLOPpMB, @@ -209,10 +210,10 @@ def get_average_val( xvec.append(x) yvec.append(y) # print('xvec', xvec, 'yvec', yvec) - xvec = xp.array(xvec) - yvec = xp.array(yvec) + xvec = np.array(xvec) + yvec = np.array(yvec) # print('xvec', xvec, 'yvec', yvec) - return xp.average(xvec), xp.average(yvec), xp.std(xvec), xp.std(yvec) + return np.average(xvec), np.average(yvec), np.std(xvec), np.std(yvec) def get_maximum(path, df_index=-1, metric="DP [MFLOP/s] STAT", column_name="Sum"): diff --git a/src/struphy/post_processing/orbits/orbits_tools.py b/src/struphy/post_processing/orbits/orbits_tools.py index 97eee89af..eb72ebdfb 100644 --- a/src/struphy/post_processing/orbits/orbits_tools.py +++ b/src/struphy/post_processing/orbits/orbits_tools.py @@ -1,12 +1,13 @@ import os import shutil -import cunumpy as xp import h5py import yaml from tqdm import tqdm +from struphy.io.setup import setup_domain_and_equil from struphy.post_processing.orbits.orbits_kernels import calculate_guiding_center_from_6d +from struphy.utils.arrays import xp as np def post_process_orbit_guiding_center(path_in, path_kinetics_species, species): @@ -61,7 +62,7 @@ def post_process_orbit_guiding_center(path_in, path_kinetics_species, species): if file.endswith(".npy") ] pproc_nt = len(npy_files_list) - n_markers = xp.load(os.path.join(path_orbits, npy_files_list[0])).shape[0] + n_markers = np.load(os.path.join(path_orbits, npy_files_list[0])).shape[0] # re-ordering npy_files npy_files_list = sorted(npy_files_list) @@ -76,10 +77,10 @@ def post_process_orbit_guiding_center(path_in, path_kinetics_species, species): os.mkdir(path_gc) # temporary marker array - temp = xp.empty((n_markers, 7), dtype=float) - etas = xp.empty((n_markers, 3), dtype=float) - B_cart = xp.empty((n_markers, 3), dtype=float) - lost_particles_mask = xp.empty(n_markers, dtype=bool) + temp = np.empty((n_markers, 7), dtype=float) + etas = np.empty((n_markers, 3), dtype=float) + B_cart = np.empty((n_markers, 3), dtype=float) + lost_particles_mask = np.empty(n_markers, dtype=bool) print("Evaluation of guiding center for " + str(species)) @@ -94,13 +95,13 @@ def post_process_orbit_guiding_center(path_in, path_kinetics_species, species): file_txt = os.path.join(path_gc, npy_files_list[n][:-4] + ".txt") # call .npy file - temp[:, :] = xp.load(os.path.join(path_orbits, npy_files_list[n])) + temp[:, :] = np.load(os.path.join(path_orbits, npy_files_list[n])) # move ids to last column and save - temp = xp.roll(temp, -1, axis=1) + temp = np.roll(temp, -1, axis=1) # sorting out lost particles - lost_particles_mask = xp.all(temp[:, :-1] == 0, axis=1) + lost_particles_mask = np.all(temp[:, :-1] == 0, axis=1) # domain inverse map etas[~lost_particles_mask, :] = domain.inverse_map( @@ -110,7 +111,7 @@ def post_process_orbit_guiding_center(path_in, path_kinetics_species, species): # eval cartesian magnetic filed at marker positions B_cart[~lost_particles_mask, :] = equil.b_cart( - *xp.concatenate( + *np.concatenate( ( etas[:, 0][:, None], etas[:, 1][:, None], @@ -123,10 +124,10 @@ def post_process_orbit_guiding_center(path_in, path_kinetics_species, species): calculate_guiding_center_from_6d(temp, B_cart) # move ids to first column and save - temp = xp.roll(temp, 1, axis=1) + temp = np.roll(temp, 1, axis=1) - xp.save(file_npy, temp) - xp.savetxt(file_txt, temp[:, :4], fmt="%12.6f", delimiter=", ") + np.save(file_npy, temp) + np.savetxt(file_txt, temp[:, :4], fmt="%12.6f", delimiter=", ") def post_process_orbit_classification(path_kinetics_species, species): @@ -168,16 +169,16 @@ def post_process_orbit_classification(path_kinetics_species, species): if file.endswith(".npy") ] pproc_nt = len(npy_files_list) - n_markers = xp.load(os.path.join(path_gc, npy_files_list[0])).shape[0] + n_markers = np.load(os.path.join(path_gc, npy_files_list[0])).shape[0] # re-ordering npy_files npy_files_list = sorted(npy_files_list) # temporary marker array - temp = xp.empty((n_markers, 8), dtype=float) - v_parallel = xp.empty(n_markers, dtype=float) - trapped_particle_mask = xp.empty(n_markers, dtype=bool) - lost_particle_mask = xp.empty(n_markers, dtype=bool) + temp = np.empty((n_markers, 8), dtype=float) + v_parallel = np.empty(n_markers, dtype=float) + trapped_particle_mask = np.empty(n_markers, dtype=bool) + lost_particle_mask = np.empty(n_markers, dtype=bool) print("Classifying guiding center orbits for " + str(species)) @@ -188,16 +189,16 @@ def post_process_orbit_classification(path_kinetics_species, species): # load .npy files file_npy = os.path.join(path_gc, npy_files_list[n]) - temp[:, :-1] = xp.load(file_npy) + temp[:, :-1] = np.load(file_npy) # initial time step if n == 0: v_init = temp[:, 4] - xp.save(file_npy, temp) + np.save(file_npy, temp) continue # synchronizing with former time step - temp[:, -1] = xp.load( + temp[:, -1] = np.load( os.path.join( path_gc, npy_files_list[n - 1], @@ -205,10 +206,10 @@ def post_process_orbit_classification(path_kinetics_species, species): )[:, -1] # call parallel velocity data from .npy file - v_parallel = xp.load(os.path.join(path_gc, npy_files_list[n]))[:, 4] + v_parallel = np.load(os.path.join(path_gc, npy_files_list[n]))[:, 4] # sorting out lost particles - lost_particle_mask = xp.all(temp[:, 1:-1] == 0, axis=1) + lost_particle_mask = np.all(temp[:, 1:-1] == 0, axis=1) # check reverse of parallel velocity trapped_particle_mask[:] = False @@ -221,4 +222,4 @@ def post_process_orbit_classification(path_kinetics_species, species): # assign "-1" at the last index of lost particles temp[lost_particle_mask, -1] = -1 - xp.save(file_npy, temp) + np.save(file_npy, temp) diff --git a/src/struphy/post_processing/post_processing_tools.py b/src/struphy/post_processing/post_processing_tools.py index e0759bb63..28919c6d3 100644 --- a/src/struphy/post_processing/post_processing_tools.py +++ b/src/struphy/post_processing/post_processing_tools.py @@ -1,122 +1,20 @@ import os -import pickle import shutil -import cunumpy as xp import h5py +import matplotlib.pyplot as plt import yaml from tqdm import tqdm -from struphy.feec.psydac_derham import SplineFunction -from struphy.fields_background import equils -from struphy.fields_background.base import FluidEquilibrium -from struphy.geometry import domains -from struphy.geometry.base import Domain -from struphy.io.options import BaseUnits, EnvironmentOptions, Time -from struphy.io.setup import import_parameters_py +from struphy.feec.psydac_derham import Derham +from struphy.io.setup import setup_domain_and_equil from struphy.kinetic_background import maxwellians -from struphy.kinetic_background.base import KineticBackground -from struphy.models.base import StruphyModel, setup_derham -from struphy.models.species import ParticleSpecies -from struphy.models.variables import PICVariable -from struphy.topology.grids import TensorProductGrid - - -class ParamsIn: - """Holds the input parameters of a Struphy simulation as attributes.""" - - def __init__( - self, - env: EnvironmentOptions = None, - base_units: BaseUnits = None, - time_opts: Time = None, - domain=None, - equil=None, - grid: TensorProductGrid = None, - derham_opts=None, - model: StruphyModel = None, - ): - self.env = env - self.units = base_units - self.time_opts = time_opts - self.domain = domain - self.equil = equil - self.grid = grid - self.derham_opts = derham_opts - self.model = model - - -def get_params_of_run(path: str) -> ParamsIn: - """Retrieve parameters of finished Struphy run. - - Parameters - ---------- - path : str - Absolute path of simulation output folder. - """ - - print(f"\nReading in paramters from {path} ... ") - - params_path = os.path.join(path, "parameters.py") - bin_path = os.path.join(path, "env.bin") - - if os.path.exists(params_path): - params_in = import_parameters_py(params_path) - env = params_in.env - base_units = params_in.base_units - time_opts = params_in.time_opts - domain = params_in.domain - equil = params_in.equil - grid = params_in.grid - derham_opts = params_in.derham_opts - model = params_in.model - - elif os.path.exists(bin_path): - with open(os.path.join(path, "env.bin"), "rb") as f: - env = pickle.load(f) - with open(os.path.join(path, "base_units.bin"), "rb") as f: - base_units = pickle.load(f) - with open(os.path.join(path, "time_opts.bin"), "rb") as f: - time_opts = pickle.load(f) - with open(os.path.join(path, "domain.bin"), "rb") as f: - # WORKAROUND: cannot pickle pyccelized classes at the moment - domain_dct = pickle.load(f) - domain: Domain = getattr(domains, domain_dct["name"])(**domain_dct["params"]) - with open(os.path.join(path, "equil.bin"), "rb") as f: - # WORKAROUND: cannot pickle pyccelized classes at the moment - equil_dct = pickle.load(f) - if equil_dct: - equil: FluidEquilibrium = getattr(equils, equil_dct["name"])(**equil_dct["params"]) - else: - equil = None - with open(os.path.join(path, "grid.bin"), "rb") as f: - grid = pickle.load(f) - with open(os.path.join(path, "derham_opts.bin"), "rb") as f: - derham_opts = pickle.load(f) - with open(os.path.join(path, "model_class.bin"), "rb") as f: - model_class: StruphyModel = pickle.load(f) - model = model_class() - - else: - raise FileNotFoundError(f"Neither of the paths {params_path} or {bin_path} exists.") - - print("done.") - - return ParamsIn( - env=env, - base_units=base_units, - time_opts=time_opts, - domain=domain, - equil=equil, - grid=grid, - derham_opts=derham_opts, - model=model, - ) +from struphy.models import fluid, hybrid, kinetic, toy +from struphy.utils.arrays import xp as np def create_femfields( path: str, - params_in: ParamsIn, *, step: int = 1, ): @@ -127,9 +25,6 @@ def create_femfields( path : str Absolute path of simulation output folder. - params_in : ParamsIn - Simulation parameters. - step : int Whether to create FEM fields at every time step (step=1, default), every second time step (step=2), etc. @@ -138,50 +33,50 @@ def create_femfields( fields : dict Nested dictionary holding :class:`~struphy.feec.psydac_derham.SplineFunction`: fields[t][name] contains the Field with the name "name" in the hdf5 file at time t. - t_grid : xp.ndarray - Time grid. + space_ids : dict + The space IDs of the fields (H1, Hcurl, Hdiv, L2 or H1vec). space_ids[name] contains the space ID of the field with the name "name". + + model : str + From which model in struphy/models the data has been obtained. """ - with open(os.path.join(path, "meta.yml"), "r") as f: - meta = yaml.load(f, Loader=yaml.FullLoader) - nproc = meta["MPI processes"] + # get model name and # of MPI processes from meta.txt file + with open(os.path.join(path, "meta.txt"), "r") as f: + lines = f.readlines() + + model = lines[3].split()[-1] + nproc = lines[4].split()[-1] - derham = setup_derham( - params_in.grid, - params_in.derham_opts, - comm=None, - domain=params_in.domain, + # create Derham sequence from grid parameters + with open(os.path.join(path, "parameters.yml"), "r") as f: + params = yaml.load(f, Loader=yaml.FullLoader) + + derham = Derham( + params["grid"]["Nel"], + params["grid"]["p"], + params["grid"]["spl_kind"], ) # get fields names, space IDs and time grid from 0-th rank hdf5 file file = h5py.File(os.path.join(path, "data/", "data_proc0.hdf5"), "r") + space_ids = {} - print("\nReading hdf5 data of following species:") - for species, dset in file["feec"].items(): - space_ids[species] = {} - print(f"{species}:") - for var, ddset in dset.items(): - space_ids[species][var] = ddset.attrs["space_id"] - print(f" {var}:", ddset) + + for field_name, dset in file["feec"].items(): + space_ids[field_name] = dset.attrs["space_id"] t_grid = file["time/value"][::step].copy() + file.close() # create one FemField for each snapshot fields = {} for t in t_grid: fields[t] = {} - for species, vars in space_ids.items(): - fields[t][species] = {} - for var, id in vars.items(): - fields[t][species][var] = derham.create_spline_function( - var, - id, - verbose=False, - ) + for field_name, ID in space_ids.items(): + fields[t][field_name] = derham.create_spline_function(field_name, ID) # get hdf5 data - print("") for rank in range(int(nproc)): # open hdf5 file file = h5py.File( @@ -193,66 +88,67 @@ def create_femfields( "r", ) - for species, dset in file["feec"].items(): - for var, ddset in tqdm(dset.items()): - # get global start indices, end indices and pads - gl_s = ddset.attrs["starts"] - gl_e = ddset.attrs["ends"] - pads = ddset.attrs["pads"] - - assert gl_s.shape == (3,) or gl_s.shape == (3, 3) - assert gl_e.shape == (3,) or gl_e.shape == (3, 3) - assert pads.shape == (3,) or pads.shape == (3, 3) - - # loop over time - for n, t in enumerate(t_grid): - # scalar field - if gl_s.shape == (3,): - s1, s2, s3 = gl_s - e1, e2, e3 = gl_e - p1, p2, p3 = pads - - data = ddset[n * step, p1:-p1, p2:-p2, p3:-p3].copy() - - fields[t][species][var].vector[ + for field_name, dset in tqdm(file["feec"].items()): + # get global start indices, end indices and pads + gl_s = dset.attrs["starts"] + gl_e = dset.attrs["ends"] + pads = dset.attrs["pads"] + + assert gl_s.shape == (3,) or gl_s.shape == (3, 3) + assert gl_e.shape == (3,) or gl_e.shape == (3, 3) + assert pads.shape == (3,) or pads.shape == (3, 3) + + # loop over time + for n, t in enumerate(t_grid): + # scalar field + if gl_s.shape == (3,): + s1, s2, s3 = gl_s + e1, e2, e3 = gl_e + p1, p2, p3 = pads + + data = dset[n * step, p1:-p1, p2:-p2, p3:-p3].copy() + + fields[t][field_name].vector[ + s1 : e1 + 1, + s2 : e2 + 1, + s3 : e3 + 1, + ] = data + # update after each data addition, can be made more efficient + fields[t][field_name].vector.update_ghost_regions() + + # vector-valued field + else: + for comp in range(3): + s1, s2, s3 = gl_s[comp] + e1, e2, e3 = gl_e[comp] + p1, p2, p3 = pads[comp] + + data = dset[str(comp + 1)][ + n * step, + p1:-p1, + p2:-p2, + p3:-p3, + ].copy() + + fields[t][field_name].vector[comp][ s1 : e1 + 1, s2 : e2 + 1, s3 : e3 + 1, ] = data - # update after each data addition, can be made more efficient - fields[t][species][var].vector.update_ghost_regions() + # update after each data addition, can be made more efficient + fields[t][field_name].vector.update_ghost_regions() - # vector-valued field - else: - for comp in range(3): - s1, s2, s3 = gl_s[comp] - e1, e2, e3 = gl_e[comp] - p1, p2, p3 = pads[comp] - - data = ddset[str(comp + 1)][ - n * step, - p1:-p1, - p2:-p2, - p3:-p3, - ].copy() - - fields[t][species][var].vector[comp][ - s1 : e1 + 1, - s2 : e2 + 1, - s3 : e3 + 1, - ] = data - # update after each data addition, can be made more efficient - fields[t][species][var].vector.update_ghost_regions() file.close() print("Creation of Struphy Fields done.") - return fields, t_grid + return fields, space_ids, model def eval_femfields( - params_in: ParamsIn, + path: str, fields: dict, + space_ids: dict, *, celldivide: list = [1, 1, 1], physical: bool = False, @@ -261,12 +157,15 @@ def eval_femfields( Parameters ---------- - params_in : ParamsIn - Simulation parameters. + path : str + Absolute path of simulation output folder. fields : dict Obtained from struphy.diagnostics.post_processing.create_femfields. + space_ids : dict + Obtained from struphy.diagnostics.post_processing.create_femfields. + celldivide : list of ints Grid refinement in each eta direction. @@ -276,7 +175,7 @@ def eval_femfields( Returns ------- point_data : dict - Nested dictionary holding values of FemFields on the grid as list of 3d xp.arrays: + Nested dictionary holding values of FemFields on the grid as list of 3d np.arrays: point_data[name][t] contains the values of the field with name "name" in fields[t].keys() at time t. If physical is True, physical components of fields are saved. @@ -289,17 +188,22 @@ def eval_femfields( Mapped (physical) grids obtained by domain(*grids_log). """ - # get domain - domain = params_in.domain + assert isinstance(fields, dict) + assert isinstance(space_ids, dict) + + # domain object according to parameter file and grids + with open(os.path.join(path, "parameters.yml"), "r") as f: + params = yaml.load(f, Loader=yaml.FullLoader) + + domain = setup_domain_and_equil(params)[0] # create logical and physical grids - assert isinstance(fields, dict) assert isinstance(celldivide, list) assert len(celldivide) == 3 - Nel = params_in.grid.Nel + Nel = params["grid"]["Nel"] - grids_log = [xp.linspace(0.0, 1.0, Nel_i * n_i + 1) for Nel_i, n_i in zip(Nel, celldivide)] + grids_log = [np.linspace(0.0, 1.0, Nel_i * n_i + 1) for Nel_i, n_i in zip(Nel, celldivide)] grids_phy = [ domain(*grids_log)[0], domain(*grids_log)[1], @@ -308,86 +212,86 @@ def eval_femfields( # evaluate fields at evaluation grid and push-forward point_data = {} - for species, vars in fields[list(fields.keys())[0]].items(): - point_data[species] = {} - for name, field in vars.items(): - point_data[species][name] = {} - print("\nEvaluating fields ...") - for t in tqdm(fields): - for species, vars in fields[t].items(): - for name, field in vars.items(): - assert isinstance(field, SplineFunction) - space_id = field.space_id + # one dict for each field + for name in space_ids: + point_data[name] = {} - # field evaluation - temp_val = field(*grids_log) + # time loop + print("Evaluating fields ...") + for t in tqdm(fields): + # field loop + for name, field in fields[t].items(): + # space ID + space_id = space_ids[name] + + # field evaluation + temp_val = field(*grids_log) + + point_data[name][t] = [] + + # scalar spaces + if isinstance(temp_val, np.ndarray): + if physical: + # push-forward + if space_id == "H1": + point_data[name][t].append( + domain.push( + temp_val, + *grids_log, + kind="0", + ), + ) + elif space_id == "L2": + point_data[name][t].append( + domain.push( + temp_val, + *grids_log, + kind="3", + ), + ) - point_data[species][name][t] = [] + else: + point_data[name][t].append(temp_val) - # scalar spaces - if isinstance(temp_val, xp.ndarray): + # vector-valued spaces + else: + for j in range(3): if physical: # push-forward - if space_id == "H1": - point_data[species][name][t].append( + if space_id == "Hcurl": + point_data[name][t].append( + domain.push( + temp_val, + *grids_log, + kind="1", + )[j], + ) + elif space_id == "Hdiv": + point_data[name][t].append( domain.push( temp_val, *grids_log, - kind="0", - ), + kind="2", + )[j], ) - elif space_id == "L2": - point_data[species][name][t].append( + elif space_id == "H1vec": + point_data[name][t].append( domain.push( temp_val, *grids_log, - kind="3", - ), + kind="v", + )[j], ) else: - point_data[species][name][t].append(temp_val) - - # vector-valued spaces - else: - for j in range(3): - if physical: - # push-forward - if space_id == "Hcurl": - point_data[species][name][t].append( - domain.push( - temp_val, - *grids_log, - kind="1", - )[j], - ) - elif space_id == "Hdiv": - point_data[species][name][t].append( - domain.push( - temp_val, - *grids_log, - kind="2", - )[j], - ) - elif space_id == "H1vec": - point_data[species][name][t].append( - domain.push( - temp_val, - *grids_log, - kind="v", - )[j], - ) - - else: - point_data[species][name][t].append(temp_val[j]) + point_data[name][t].append(temp_val[j]) return point_data, grids_log, grids_phy def create_vtk( path: str, - t_grid: xp.ndarray, grids_phy: list, point_data: dict, *, @@ -400,9 +304,6 @@ def create_vtk( path : str Absolute path of where to store the .vts files. Will then be in path/vtk/step_.vts. - t_grid : xp.ndarray - Time grid. - grids_phy : 3-list Mapped (physical) grids obtained from struphy.diagnostics.post_processing.eval_femfields. @@ -415,52 +316,48 @@ def create_vtk( from pyevtk.hl import gridToVTK - for species, vars in point_data.items(): - species_path = os.path.join(path, species, "vtk" + physical * "_phy") - try: - os.mkdir(species_path) - except: - shutil.rmtree(species_path) - os.mkdir(species_path) + # directory for vtk files + path_vtk = os.path.join(path, "vtk" + physical * "_phy") + + try: + os.mkdir(path_vtk) + except: + shutil.rmtree(path_vtk) + os.mkdir(path_vtk) + + # field names + names = list(point_data.keys()) # time loop - nt = len(t_grid) - 1 - log_nt = int(xp.log10(nt)) + 1 + tgrid = list(point_data[names[0]].keys()) + + nt = len(tgrid) - 1 + log_nt = int(np.log10(nt)) + 1 - print(f"\nCreating vtk in {path} ...") - for n, t in enumerate(tqdm(t_grid)): + print("Creating vtk ...") + for n, t in enumerate(tqdm(tgrid)): point_data_n = {} - for species, vars in point_data.items(): - species_path = os.path.join(path, species, "vtk" + physical * "_phy") - point_data_n[species] = {} - for name, data in vars.items(): - points_list = data[t] + for name in names: + points_list = point_data[name][t] - # scalar - if len(points_list) == 1: - point_data_n[species][name] = points_list[0] + # scalar + if len(points_list) == 1: + point_data_n[name] = points_list[0] - # vectorpoint_data[name] - else: - for j in range(3): - point_data_n[species][name + f"_{j + 1}"] = points_list[j] + # vector + else: + for j in range(3): + point_data_n[name + f"_{j + 1}"] = points_list[j] - gridToVTK( - os.path.join(species_path, "step_{0:0{1}d}".format(n, log_nt)), - *grids_phy, - pointData=point_data_n[species], - ) + gridToVTK( + os.path.join(path_vtk, "step_{0:0{1}d}".format(n, log_nt)), + *grids_phy, + pointData=point_data_n, + ) -def post_process_markers( - path_in: str, - path_out: str, - species: str, - domain: Domain, - kind: str = "Particles6D", - step: int = 1, -): +def post_process_markers(path_in, path_out, species, kind, step=1): """Computes the Cartesian (x, y, z) coordinates of saved markers during a simulation and writes them to a .npy files and to .txt files. Also saves the weights. @@ -512,19 +409,24 @@ def post_process_markers( species : str Name of the species for which the post processing should be performed. - domain : Domain - Domain object. - kind : str Name of the kinetic kind (Particles6D, Particles5D or Particles3D). step : int, optional Whether to do post-processing at every time step (step=1, default), every second time step (step=2), etc. """ + # get # of MPI processes from meta.txt file - with open(os.path.join(path_in, "meta.yml"), "r") as f: - meta = yaml.load(f, Loader=yaml.FullLoader) - nproc = meta["MPI processes"] + with open(os.path.join(path_in, "meta.txt"), "r") as f: + lines = f.readlines() + + nproc = lines[4].split()[-1] + + with open(os.path.join(path_in, "parameters.yml"), "r") as f: + params = yaml.load(f, Loader=yaml.FullLoader) + + # create domain for calculating markers' physical coordinates + domain = setup_domain_and_equil(params)[0] # open hdf5 files and get names and number of saved markers of kinetic species files = [ @@ -542,16 +444,17 @@ def post_process_markers( # get number of time steps and markers nt, n_markers, n_cols = files[0]["kinetic/" + species + "/markers"].shape - log_nt = int(xp.log10(int(((nt - 1) / step)))) + 1 + log_nt = int(np.log10(int(((nt - 1) / step)))) + 1 # directory for .txt files and marker index which will be saved - path_orbits = os.path.join(path_out, "orbits") - if "5D" in kind: + path_orbits = os.path.join(path_out, "guiding_center") save_index = list(range(0, 6)) + [10] + [-1] elif "6D" in kind or "SPH" in kind: + path_orbits = os.path.join(path_out, "orbits") save_index = list(range(0, 7)) + [-1] else: + path_orbits = os.path.join(path_out, "orbits") save_index = list(range(0, 4)) + [-1] try: @@ -561,15 +464,15 @@ def post_process_markers( os.mkdir(path_orbits) # temporary array - temp = xp.empty((n_markers, len(save_index)), order="C") - lost_particles_mask = xp.empty(n_markers, dtype=bool) + temp = np.empty((n_markers, len(save_index)), order="C") + lost_particles_mask = np.empty(n_markers, dtype=bool) print(f"Evaluation of {n_markers} marker orbits for {species}") # loop over time grid for n in tqdm(range(int((nt - 1) / step) + 1)): # clear buffer - temp[:, :] = 0.0 + temp[:, :] = 0 # create text file for this time step and this species file_npy = os.path.join( @@ -589,42 +492,35 @@ def post_process_markers( # sorting out lost particles ids = temp[:, -1].astype("int") - ids_lost_particles = xp.setdiff1d(xp.arange(n_markers), ids) - ids_removed_particles = xp.nonzero(temp[:, 0] == -1.0)[0] - ids_lost_particles = xp.array(list(set(ids_lost_particles) | set(ids_removed_particles)), dtype=int) + ids_lost_particles = np.setdiff1d(np.arange(n_markers), ids) lost_particles_mask[:] = False lost_particles_mask[ids_lost_particles] = True if len(ids_lost_particles) > 0: # lost markers are saved as [0, ..., 0, ids] temp[lost_particles_mask, -1] = ids_lost_particles - ids = xp.unique(xp.append(ids, ids_lost_particles)) + ids = np.unique(np.append(ids, ids_lost_particles)) - assert xp.all(sorted(ids) == xp.arange(n_markers)) + assert np.all(sorted(ids) == np.arange(n_markers)) # compute physical positions (x, y, z) - pos_phys = domain(xp.array(temp[~lost_particles_mask, :3]), change_out_order=True) - temp[~lost_particles_mask, :3] = pos_phys + temp[~lost_particles_mask, :3] = domain( + np.array(temp[~lost_particles_mask, :3]), + change_out_order=True, + ) + + # move ids to first column and save + temp = np.roll(temp, 1, axis=1) - # save numpy - xp.save(file_npy, temp) - # move ids to first column and save txt - temp = xp.roll(temp, 1, axis=1) - xp.savetxt(file_txt, temp[:, (0, 1, 2, 3, -1)], fmt="%12.6f", delimiter=", ") + np.save(file_npy, temp) + np.savetxt(file_txt, temp[:, (0, 1, 2, 3, -1)], fmt="%12.6f", delimiter=", ") # close hdf5 files for file in files: file.close() -def post_process_f( - path_in, - params_in: ParamsIn, - path_out, - species, - step=1, - compute_bckgr=False, -): +def post_process_f(path_in, path_out, species, step=1, compute_bckgr=False): """Computes and saves distribution functions of saved binning data during a simulation. Parameters @@ -632,9 +528,6 @@ def post_process_f( path_in : str Absolute path of simulation output folder. - params_in : ParamsIn - Simulation parameters. - path_out : str Absolute path of where to store the .txt files. Will be in path_out/orbits. @@ -645,15 +538,21 @@ def post_process_f( Whether to do post-processing at every time step (step=1, default), every second time step (step=2), etc. compute_bckgr : bool - Whether to compute the kinetic background values and add them to the binning data. + Whehter to compute the kinetic background values and add them to the binning data. This is used if non-standard weights are binned. """ - # get # of MPI processes from meta file - with open(os.path.join(path_in, "meta.yml"), "r") as f: - meta = yaml.load(f, Loader=yaml.FullLoader) - nproc = meta["MPI processes"] - # open hdf5 files and get names and number of saved markers of kinetic species + # get model name and # of MPI processes from meta.txt file + with open(os.path.join(path_in, "meta.txt"), "r") as f: + lines = f.readlines() + + nproc = lines[4].split()[-1] + + # load parameters + with open(os.path.join(path_in, "parameters.yml"), "r") as f: + params = yaml.load(f, Loader=yaml.FullLoader) + + # open hdf5 files files = [ h5py.File( os.path.join( @@ -692,7 +591,7 @@ def post_process_f( path_slice, "grid_" + slice_names[n_gr] + ".npy", ) - xp.save(grid_path, grid[:]) + np.save(grid_path, grid[:]) # compute distribution function for slice_name in tqdm(files[0]["kinetic/" + species + "/f"]): @@ -713,31 +612,27 @@ def post_process_f( data_df += files[rank]["kinetic/" + species + "/df/" + slice_name][::step] # save distribution functions - xp.save(os.path.join(path_slice, "f_binned.npy"), data) - xp.save(os.path.join(path_slice, "delta_f_binned.npy"), data_df) + np.save(os.path.join(path_slice, "f_binned.npy"), data) + np.save(os.path.join(path_slice, "delta_f_binned.npy"), data_df) if compute_bckgr: - # bckgr_params = params["kinetic"][species]["background"] - - # f_bckgr = None - # for fi, maxw_params in bckgr_params.items(): - # if fi[-2] == "_": - # fi_type = fi[:-2] - # else: - # fi_type = fi - - # if f_bckgr is None: - # f_bckgr = getattr(maxwellians, fi_type)( - # maxw_params=maxw_params, - # ) - # else: - # f_bckgr = f_bckgr + getattr(maxwellians, fi_type)( - # maxw_params=maxw_params, - # ) - - spec: ParticleSpecies = getattr(params_in.model, species) - var: PICVariable = spec.var - f_bckgr: KineticBackground = var.backgrounds + bckgr_params = params["kinetic"][species]["background"] + + f_bckgr = None + for fi, maxw_params in bckgr_params.items(): + if fi[-2] == "_": + fi_type = fi[:-2] + else: + fi_type = fi + + if f_bckgr is None: + f_bckgr = getattr(maxwellians, fi_type)( + maxw_params=maxw_params, + ) + else: + f_bckgr = f_bckgr + getattr(maxwellians, fi_type)( + maxw_params=maxw_params, + ) # load all grids of the variables of f grid_tot = [] @@ -753,11 +648,11 @@ def post_process_f( # check if file exists and is in slice_name if os.path.exists(filename) and current_slice in slice_names: - grid_tot += [xp.load(filename)] + grid_tot += [np.load(filename)] # otherwise evaluate at zero else: - grid_tot += [xp.zeros(1)] + grid_tot += [np.zeros(1)] # v-grid for comp in range(1, f_bckgr.vdim + 1): @@ -769,15 +664,15 @@ def post_process_f( # check if file exists and is in slice_name if os.path.exists(filename) and current_slice in slice_names: - grid_tot += [xp.load(filename)] + grid_tot += [np.load(filename)] # otherwise evaluate at zero else: - grid_tot += [xp.zeros(1)] + grid_tot += [np.zeros(1)] # correct integrating out in v-direction, TODO: check for 5D Maxwellians - factor *= xp.sqrt(2 * xp.pi) + factor *= np.sqrt(2 * np.pi) - grid_eval = xp.meshgrid(*grid_tot, indexing="ij") + grid_eval = np.meshgrid(*grid_tot, indexing="ij") data_bckgr = f_bckgr(*grid_eval).squeeze() @@ -788,9 +683,9 @@ def post_process_f( data_delta_f = data_df # save distribution function - xp.save(os.path.join(path_slice, "delta_f_binned.npy"), data_delta_f) + np.save(os.path.join(path_slice, "delta_f_binned.npy"), data_delta_f) # add extra axis for data_bckgr since data_delta_f has axis for time series - xp.save( + np.save( os.path.join(path_slice, "f_binned.npy"), data_delta_f + data_bckgr[tuple([None])], ) @@ -800,13 +695,7 @@ def post_process_f( file.close() -def post_process_n_sph( - path_in, - params_in: ParamsIn, - path_out, - species, - step=1, -): +def post_process_n_sph(path_in, path_out, species, step=1, compute_bckgr=False): """Computes and saves the density n of saved sph data during a simulation. Parameters @@ -814,9 +703,6 @@ def post_process_n_sph( path_in : str Absolute path of simulation output folder. - params_in : ParamsIn - Simulation parameters. - path_out : str Absolute path of where to store the .txt files. Will be in path_out/orbits. @@ -825,11 +711,21 @@ def post_process_n_sph( step : int, optional Whether to do post-processing at every time step (step=1, default), every second time step (step=2), etc. + + compute_bckgr : bool + Whehter to compute the kinetic background values and add them to the binning data. + This is used if non-standard weights are binned. """ - # get model name and # of MPI processes from meta file - with open(os.path.join(path_in, "meta.yml"), "r") as f: - meta = yaml.load(f, Loader=yaml.FullLoader) - nproc = meta["MPI processes"] + + # get model name and # of MPI processes from meta.txt file + with open(os.path.join(path_in, "meta.txt"), "r") as f: + lines = f.readlines() + + nproc = lines[4].split()[-1] + + # load parameters + with open(os.path.join(path_in, "parameters.yml"), "r") as f: + params = yaml.load(f, Loader=yaml.FullLoader) # open hdf5 files files = [ @@ -866,7 +762,7 @@ def post_process_n_sph( eta2 = files[0]["kinetic/" + species + "/n_sph/" + view].attrs["eta2"] eta3 = files[0]["kinetic/" + species + "/n_sph/" + view].attrs["eta3"] - ee1, ee2, ee3 = xp.meshgrid( + ee1, ee2, ee3 = np.meshgrid( eta1, eta2, eta3, @@ -877,7 +773,7 @@ def post_process_n_sph( path_view, "grid_n_sph.npy", ) - xp.save(grid_path, (ee1, ee2, ee3)) + np.save(grid_path, (ee1, ee2, ee3)) # load n_sph data data = files[0]["kinetic/" + species + "/n_sph/" + view][::step].copy() @@ -885,4 +781,4 @@ def post_process_n_sph( data += files[rank]["kinetic/" + species + "/n_sph/" + view][::step] # save distribution functions - xp.save(os.path.join(path_view, "n_sph.npy"), data) + np.save(os.path.join(path_view, "n_sph.npy"), data) diff --git a/src/struphy/post_processing/pproc_struphy.py b/src/struphy/post_processing/pproc_struphy.py index 940c94ab3..29199d065 100644 --- a/src/struphy/post_processing/pproc_struphy.py +++ b/src/struphy/post_processing/pproc_struphy.py @@ -1,16 +1,3 @@ -import os -import pickle -import shutil - -import cunumpy as xp -import h5py -import yaml - -import struphy.post_processing.orbits.orbits_tools as orbits_pproc -import struphy.post_processing.post_processing_tools as pproc -from struphy.io.setup import import_parameters_py - - def main( path: str, *, @@ -49,6 +36,19 @@ def main( time_trace : bool whether to plot the time trace of each measured region """ + + import os + import pickle + import shutil + + import h5py + import yaml + + import struphy.post_processing.orbits.orbits_tools as orbits_pproc + import struphy.post_processing.post_processing_tools as pproc + from struphy.models import fluid, hybrid, kinetic, toy + from struphy.utils.arrays import xp as np + print("") # create post-processing folder @@ -64,7 +64,24 @@ def main( file = h5py.File(os.path.join(path, "data/", "data_proc0.hdf5"), "r") # save time grid at which post-processing data is created - xp.save(os.path.join(path_pproc, "t_grid.npy"), file["time/value"][::step].copy()) + np.save(os.path.join(path_pproc, "t_grid.npy"), file["time/value"][::step].copy()) + + # load parameters.yml + with open(os.path.join(path, "parameters.yml"), "r") as f: + params = yaml.load(f, Loader=yaml.FullLoader) + + # get model class from meta.txt file + with open(os.path.join(path, "meta.txt"), "r") as f: + lines = f.readlines() + model_name = lines[3].split()[-1] + + objs = [fluid, kinetic, hybrid, toy] + + for obj in objs: + try: + model_class = getattr(obj, model_name) + except AttributeError: + pass if "feec" in file.keys(): exist_fields = True @@ -73,40 +90,42 @@ def main( if "kinetic" in file.keys(): exist_kinetic = {"markers": False, "f": False, "n_sph": False} + + kinetic_species = [] + kinetic_kinds = [] + for name in file["kinetic"].keys(): + kinetic_species += [name] + kinetic_kinds += [model_class.species()["kinetic"][name]] + # check for saved markers if "markers" in file["kinetic"][name]: exist_kinetic["markers"] = True + # check for saved distribution function if "f" in file["kinetic"][name]: exist_kinetic["f"] = True + # check for saved sph density if "n_sph" in file["kinetic"][name]: exist_kinetic["n_sph"] = True + else: exist_kinetic = None file.close() - # import parameters - params_in = import_parameters_py(os.path.join(path, "parameters.py")) - # field post-processing if exist_fields: - fields, t_grid = pproc.create_femfields(path, params_in, step=step) + fields, space_ids, _ = pproc.create_femfields(path, step=step) point_data, grids_log, grids_phy = pproc.eval_femfields( - params_in, - fields, - celldivide=[celldivide, celldivide, celldivide], + path, fields, space_ids, celldivide=[celldivide, celldivide, celldivide] ) if physical: point_data_phy, grids_log, grids_phy = pproc.eval_femfields( - params_in, - fields, - celldivide=[celldivide, celldivide, celldivide], - physical=True, + path, fields, space_ids, celldivide=[celldivide, celldivide, celldivide], physical=True ) # directory for field data @@ -119,19 +138,38 @@ def main( os.mkdir(path_fields) # save data dicts for each field - for species, vars in point_data.items(): - for name, val in vars.items(): + for name, val in point_data.items(): + aux = name.split("_") + # is em field + if len(aux) == 1 or "field" in name: + subfolder = "em_fields" + new_name = name + try: + os.mkdir(os.path.join(path_fields, subfolder)) + except: + pass + + # is fluid species + else: + subfolder = aux[0] + for au in aux[1:-1]: + subfolder += "_" + au + new_name = aux[-1] try: - os.mkdir(os.path.join(path_fields, species)) + os.mkdir(os.path.join(path_fields, subfolder)) except: pass - with open(os.path.join(path_fields, species, name + "_log.bin"), "wb") as handle: - pickle.dump(val, handle, protocol=pickle.HIGHEST_PROTOCOL) + print(f"{name = }") + print(f"{subfolder = }") + print(f"{new_name = }") - if physical: - with open(os.path.join(path_fields, species, name + "_phy.bin"), "wb") as handle: - pickle.dump(point_data_phy[species][name], handle, protocol=pickle.HIGHEST_PROTOCOL) + with open(os.path.join(path_fields, subfolder, new_name + "_log.bin"), "wb") as handle: + pickle.dump(val, handle, protocol=pickle.HIGHEST_PROTOCOL) + + if physical: + with open(os.path.join(path_fields, subfolder, new_name + "_phy.bin"), "wb") as handle: + pickle.dump(point_data_phy[name], handle, protocol=pickle.HIGHEST_PROTOCOL) # save grids with open(os.path.join(path_fields, "grids_log.bin"), "wb") as handle: @@ -142,9 +180,9 @@ def main( # create vtk files if not no_vtk: - pproc.create_vtk(path_fields, t_grid, grids_phy, point_data) + pproc.create_vtk(path_fields, grids_phy, point_data) if physical: - pproc.create_vtk(path_fields, t_grid, grids_phy, point_data_phy, physical=True) + pproc.create_vtk(path_fields, grids_phy, point_data_phy, physical=True) # kinetic post-processing if exist_kinetic is not None: @@ -201,19 +239,14 @@ def main( libpath = struphy.__path__[0] parser = argparse.ArgumentParser( - description="Post-process data of finished Struphy runs to prepare for diagnostics.", + description="Post-process data of finished Struphy runs to prepare for diagnostics." ) # paths of simulation folders parser.add_argument("dir", type=str, metavar="DIR", help="absolute path of simulation ouput folder to post-process") parser.add_argument( - "-s", - "--step", - type=int, - metavar="N", - help="do post-processing every N-th time step (default=1)", - default=1, + "-s", "--step", type=int, metavar="N", help="do post-processing every N-th time step (default=1)", default=1 ) parser.add_argument( @@ -231,15 +264,11 @@ def main( ) parser.add_argument( - "--guiding-center", - help="compute guiding-center coordinates (only from Particles6D)", - action="store_true", + "--guiding-center", help="compute guiding-center coordinates (only from Particles6D)", action="store_true" ) parser.add_argument( - "--classify", - help="classify guiding-center trajectories (passing, trapped or lost)", - action="store_true", + "--classify", help="classify guiding-center trajectories (passing, trapped or lost)", action="store_true" ) parser.add_argument("--no-vtk", help="whether vtk files creation should be skipped", action="store_true") diff --git a/src/struphy/post_processing/profile_struphy.py b/src/struphy/post_processing/profile_struphy.py index 43d4be47d..8c738a9a7 100644 --- a/src/struphy/post_processing/profile_struphy.py +++ b/src/struphy/post_processing/profile_struphy.py @@ -1,11 +1,11 @@ import pickle import sys -import cunumpy as xp import yaml from matplotlib import pyplot as plt from struphy.post_processing.cprofile_analyser import get_cprofile_data, replace_keys +from struphy.utils.arrays import xp as np def main(): @@ -93,7 +93,7 @@ def main(): + "ncalls".ljust(15) + "totime".ljust(15) + "percall".ljust(15) - + "cumtime".ljust(15), + + "cumtime".ljust(15) ) print("-" * 154) for position, key in enumerate(dicts[0].keys()): @@ -150,17 +150,17 @@ def main(): plt.ylabel("time [s]") plt.title("Strong scaling for Nel=" + str(val["Nel"][0]) + " cells") plt.legend(loc="lower left") - plt.loglog(val["mpi_size"], val["time"][0] / 2 ** xp.arange(len(val["time"])), "k--", alpha=0.3) + plt.loglog(val["mpi_size"], val["time"][0] / 2 ** np.arange(len(val["time"])), "k--", alpha=0.3) # weak scaling plot else: plt.plot(val["mpi_size"], val["time"], label=key) plt.xlabel("mpi_size") plt.ylabel("time [s]") plt.title( - "Weak scaling for cells/mpi_size=" + str(xp.prod(val["Nel"][0]) / val["mpi_size"][0]) + "=const.", + "Weak scaling for cells/mpi_size=" + str(np.prod(val["Nel"][0]) / val["mpi_size"][0]) + "=const." ) plt.legend(loc="upper left") - # plt.loglog(val['mpi_size'], val['time'][0]*xp.ones_like(val['time']), 'k--', alpha=0.3) + # plt.loglog(val['mpi_size'], val['time'][0]*np.ones_like(val['time']), 'k--', alpha=0.3) plt.xscale("log") plt.show() diff --git a/src/struphy/profiling/profiling.py b/src/struphy/profiling/profiling.py index e96749614..568478e40 100644 --- a/src/struphy/profiling/profiling.py +++ b/src/struphy/profiling/profiling.py @@ -17,9 +17,10 @@ # Import the profiling configuration class and context manager from functools import lru_cache -import cunumpy as xp from psydac.ddm.mpi import mpi as MPI +from struphy.utils.arrays import xp as np + @lru_cache(maxsize=None) # Cache the import result to avoid repeated imports def _import_pylikwid(): @@ -170,9 +171,9 @@ def save_to_pickle(cls, file_path): for name, region in cls._regions.items(): local_data[name] = { "ncalls": region.ncalls, - "durations": xp.array(region.durations, dtype=xp.float64), - "start_times": xp.array(region.start_times, dtype=xp.float64), - "end_times": xp.array(region.end_times, dtype=xp.float64), + "durations": np.array(region.durations, dtype=np.float64), + "start_times": np.array(region.start_times, dtype=np.float64), + "end_times": np.array(region.end_times, dtype=np.float64), "config": { "likwid": region.config.likwid, "simulation_label": region.config.simulation_label, @@ -246,7 +247,7 @@ def print_summary(cls): average_duration = total_duration / region.ncalls min_duration = min(region.durations) max_duration = max(region.durations) - std_duration = xp.std(region.durations) + std_duration = np.std(region.durations) else: total_duration = average_duration = min_duration = max_duration = std_duration = 0 @@ -270,16 +271,16 @@ def __init__(self, region_name, time_trace=False): self._region_name = self.config.simulation_label + region_name self._time_trace = time_trace self._ncalls = 0 - self._start_times = xp.empty(1, dtype=float) - self._end_times = xp.empty(1, dtype=float) - self._durations = xp.empty(1, dtype=float) + self._start_times = np.empty(1, dtype=float) + self._end_times = np.empty(1, dtype=float) + self._durations = np.empty(1, dtype=float) self._started = False def __enter__(self): if self._ncalls == len(self._start_times): - self._start_times = xp.append(self._start_times, xp.zeros_like(self._start_times)) - self._end_times = xp.append(self._end_times, xp.zeros_like(self._end_times)) - self._durations = xp.append(self._durations, xp.zeros_like(self._durations)) + self._start_times = np.append(self._start_times, np.zeros_like(self._start_times)) + self._end_times = np.append(self._end_times, np.zeros_like(self._end_times)) + self._durations = np.append(self._durations, np.zeros_like(self._durations)) if self.config.likwid: self._pylikwid().markerstartregion(self.region_name) diff --git a/src/struphy/propagators/__init__.py b/src/struphy/propagators/__init__.py index 72067e021..9d6a018ee 100644 --- a/src/struphy/propagators/__init__.py +++ b/src/struphy/propagators/__init__.py @@ -1,98 +1,95 @@ -# from struphy.propagators.propagators_coupling import ( -# CurrentCoupling5DCurlb, -# CurrentCoupling5DGradB, -# CurrentCoupling6DCurrent, -# EfieldWeights, -# PressureCoupling6D, -# VlasovAmpere, -# ) -# from struphy.propagators.propagators_fields import ( -# AdiabaticPhi, -# CurrentCoupling5DDensity, -# CurrentCoupling6DDensity, -# FaradayExtended, -# Hall, -# HasegawaWakatani, -# ImplicitDiffusion, -# JxBCold, -# Magnetosonic, -# MagnetosonicCurrentCoupling5D, -# MagnetosonicUniform, -# Maxwell, -# OhmCold, -# Poisson, -# ShearAlfven, -# ShearAlfvenB1, -# ShearAlfvenCurrentCoupling5D, -# TimeDependentSource, -# TwoFluidQuasiNeutralFull, -# VariationalDensityEvolve, -# VariationalEntropyEvolve, -# VariationalMagFieldEvolve, -# VariationalMomentumAdvection, -# VariationalPBEvolve, -# VariationalQBEvolve, -# VariationalResistivity, -# VariationalViscosity, -# ) -# from struphy.propagators.propagators_markers import ( -# PushDeterministicDiffusion, -# PushEta, -# PushEtaPC, -# PushGuidingCenterBxEstar, -# PushGuidingCenterParallel, -# PushRandomDiffusion, -# PushVinEfield, -# PushVinSPHpressure, -# PushVinViscousPotential2D, -# PushVinViscousPotential3D, -# PushVxB, -# StepStaticEfield, -# ) +from struphy.propagators.propagators_coupling import ( + CurrentCoupling5DCurlb, + CurrentCoupling5DGradB, + CurrentCoupling6DCurrent, + EfieldWeights, + PressureCoupling6D, + VlasovAmpere, +) +from struphy.propagators.propagators_fields import ( + AdiabaticPhi, + CurrentCoupling5DDensity, + CurrentCoupling6DDensity, + FaradayExtended, + Hall, + HasegawaWakatani, + ImplicitDiffusion, + JxBCold, + Magnetosonic, + MagnetosonicCurrentCoupling5D, + MagnetosonicUniform, + Maxwell, + OhmCold, + Poisson, + ShearAlfven, + ShearAlfvenB1, + ShearAlfvenCurrentCoupling5D, + TimeDependentSource, + TwoFluidQuasiNeutralFull, + VariationalDensityEvolve, + VariationalEntropyEvolve, + VariationalMagFieldEvolve, + VariationalMomentumAdvection, + VariationalPBEvolve, + VariationalQBEvolve, + VariationalResistivity, + VariationalViscosity, +) +from struphy.propagators.propagators_markers import ( + PushDeterministicDiffusion, + PushEta, + PushEtaPC, + PushGuidingCenterBxEstar, + PushGuidingCenterParallel, + PushRandomDiffusion, + PushVinEfield, + PushVinSPHpressure, + PushVinViscousPotential, + PushVxB, +) -# __all__ = [ -# "VlasovAmpere", -# "EfieldWeights", -# "PressureCoupling6D", -# "CurrentCoupling6DCurrent", -# "CurrentCoupling5DCurlb", -# "CurrentCoupling5DGradB", -# "Maxwell", -# "OhmCold", -# "JxBCold", -# "ShearAlfven", -# "ShearAlfvenB1", -# "Hall", -# "Magnetosonic", -# "MagnetosonicUniform", -# "FaradayExtended", -# "CurrentCoupling6DDensity", -# "ShearAlfvenCurrentCoupling5D", -# "CurrentCoupling5DDensity", -# "ImplicitDiffusion", -# "Poisson", -# "VariationalMomentumAdvection", -# "VariationalDensityEvolve", -# "VariationalEntropyEvolve", -# "VariationalMagFieldEvolve", -# "VariationalPBEvolve", -# "VariationalQBEvolve", -# "VariationalViscosity", -# "VariationalResistivity", -# "TimeDependentSource", -# "AdiabaticPhi", -# "HasegawaWakatani", -# "TwoFluidQuasiNeutralFull", -# "PushEta", -# "PushVxB", -# "PushVinEfield", -# "PushEtaPC", -# "PushGuidingCenterBxEstar", -# "PushGuidingCenterParallel", -# "StepStaticEfield", -# "PushDeterministicDiffusion", -# "PushRandomDiffusion", -# "PushVinSPHpressure", -# "PushVinViscousPotential2D", -# "PushVinViscousPotential3D", -# ] +__all__ = [ + "VlasovAmpere", + "EfieldWeights", + "PressureCoupling6D", + "CurrentCoupling6DCurrent", + "CurrentCoupling5DCurlb", + "CurrentCoupling5DGradB", + "Maxwell", + "OhmCold", + "JxBCold", + "ShearAlfven", + "ShearAlfvenB1", + "Hall", + "Magnetosonic", + "MagnetosonicUniform", + "FaradayExtended", + "CurrentCoupling6DDensity", + "ShearAlfvenCurrentCoupling5D", + "MagnetosonicCurrentCoupling5D", + "CurrentCoupling5DDensity", + "ImplicitDiffusion", + "Poisson", + "VariationalMomentumAdvection", + "VariationalDensityEvolve", + "VariationalEntropyEvolve", + "VariationalMagFieldEvolve", + "VariationalPBEvolve", + "VariationalQBEvolve", + "VariationalViscosity", + "VariationalResistivity", + "TimeDependentSource", + "AdiabaticPhi", + "HasegawaWakatani", + "TwoFluidQuasiNeutralFull", + "PushEta", + "PushVxB", + "PushVinEfield", + "PushEtaPC", + "PushGuidingCenterBxEstar", + "PushGuidingCenterParallel", + "PushDeterministicDiffusion", + "PushRandomDiffusion", + "PushVinSPHpressure", + "PushVinViscousPotential", +] diff --git a/src/struphy/propagators/base.py b/src/struphy/propagators/base.py index 945107bbd..8e05b1b17 100644 --- a/src/struphy/propagators/base.py +++ b/src/struphy/propagators/base.py @@ -1,24 +1,16 @@ "Propagator base class." from abc import ABCMeta, abstractmethod -from dataclasses import dataclass -from typing import Literal - -import cunumpy as xp -from psydac.linalg.block import BlockVector -from psydac.linalg.stencil import StencilVector from struphy.feec.basis_projection_ops import BasisProjectionOperators from struphy.feec.mass import WeightedMassOperators from struphy.feec.psydac_derham import Derham -from struphy.fields_background.projected_equils import ProjectedFluidEquilibriumWithB from struphy.geometry.base import Domain -from struphy.io.options import check_option -from struphy.models.variables import FEECVariable, PICVariable, SPHVariable, Variable +from struphy.utils.arrays import xp as np class Propagator(metaclass=ABCMeta): - """Base class for propagators used in StruphyModels. + """Base class for Struphy propagators used in Struphy models. Note ---- @@ -27,99 +19,55 @@ class Propagator(metaclass=ABCMeta): Only propagators that update both a FEEC and a PIC species go into ``propagators_coupling.py``. """ - @abstractmethod - class Variables: - """Define variable names and types to be updated by the propagator.""" - - def __init__(self): - self._var1 = None - - @property - def var1(self): - return self._var1 - - @var1.setter - def var1(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles6D" - self._var1 = new - - @abstractmethod - def __init__(self): - self.variables = self.Variables() - - @abstractmethod - @dataclass - class Options: - # specific literals - OptsTemplate = Literal["implicit", "explicit"] - # propagator options - opt1: str = ("implicit",) - - def __post_init__(self): - # checks - check_option(self.opt1, self.OptsTemplate) - - @property - @abstractmethod - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - @abstractmethod - def options(self, new): - assert isinstance(new, self.Options) - if True: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @abstractmethod - def allocate(self): - """Allocate all data/objects of the instance.""" - - @abstractmethod - def __call__(self, dt: float): - """Update variables from t -> t + dt. - Use ``Propagators.feec_vars_update`` to write to FEEC variables to ``Propagator.feec_vars``. + def __init__(self, *vars): + """Create an instance of a Propagator. Parameters ---------- - dt : float - Time step size. + vars : Vector or Particles + :attr:`struphy.models.base.StruphyModel.pointer` of variables to be updated. """ + from psydac.linalg.basic import Vector + + from struphy.pic.particles import Particles + + self._feec_vars = [] + self._particles = [] + + for var in vars: + if isinstance(var, Vector): + self._feec_vars += [var] + elif isinstance(var, Particles): + self._particles += [var] + else: + ValueError( + f'Variable {var} must be of type "Vector" or "Particles".', + ) + + # for iterative particle push + self._init_kernels = [] + self._eval_kernels = [] + + # mpi comm + if self.particles: + comm = self.particles[0].mpi_comm + else: + comm = self.derham.comm + self._rank = comm.Get_rank() if comm is not None else 0 - def update_feec_variables(self, **new_coeffs): - r"""Return max_diff = max(abs(new - old)) for each new_coeffs, - update feec coefficients and update ghost regions. - - Returns - ------- - diffs : dict - max_diff for all feec variables. + @property + def feec_vars(self): + """List of FEEC variables (not particles) to be updated by the propagator. + Contains FE coefficients from :attr:`struphy.feec.SplineFunction.vector`. """ - diffs = {} - for var, new in new_coeffs.items(): - assert "_" + var in self.variables.__dict__, f"{var} not in {self.variables.__dict__}." - assert isinstance(new, (StencilVector, BlockVector)) - old_var = getattr(self.variables, var) - assert isinstance(old_var, FEECVariable) - old = old_var.spline.vector - assert new.space == old.space - - # calculate maximum of difference abs(new - old) - diffs[var] = xp.max(xp.abs(new.toarray() - old.toarray())) + return self._feec_vars - # copy new coeffs into old - new.copy(out=old) - - # important: sync processes! - old.update_ghost_regions() - - return diffs + @property + def particles(self): + """List of kinetic variables (not FEEC) to be updated by the propagator. + Contains :class:`struphy.pic.particles.Particles`. + """ + return self._particles @property def init_kernels(self): @@ -143,6 +91,24 @@ def rank(self): """MPI rank, is 0 if no communicator.""" return self._rank + @abstractmethod + def __call__(self, dt): + """Update from t -> t + dt. + Use ``Propagators.feec_vars_update`` to write to FEEC variables to ``Propagator.feec_vars``. + + Parameters + ---------- + dt : float + Time step size. + """ + pass + + @staticmethod + @abstractmethod + def options(): + """Dictionary of available propagator options, as appearing under species/options in the parameter file.""" + pass + @property def derham(self): """Derham spaces and projectors.""" @@ -191,7 +157,7 @@ def basis_ops(self, basis_ops): self._basis_ops = basis_ops @property - def projected_equil(self) -> ProjectedFluidEquilibriumWithB: + def projected_equil(self): """Fluid equilibrium projected on 3d Derham sequence with commuting projectors.""" assert hasattr( self, @@ -200,9 +166,8 @@ def projected_equil(self) -> ProjectedFluidEquilibriumWithB: return self._projected_equil @projected_equil.setter - def projected_equil(self, new): - assert isinstance(new, ProjectedFluidEquilibriumWithB) - self._projected_equil = new + def projected_equil(self, projected_equil): + self._projected_equil = projected_equil @property def time_state(self): @@ -220,6 +185,40 @@ def add_time_state(self, time_state): assert time_state.size == 1 self._time_state = time_state + def feec_vars_update(self, *variables_new): + r"""Return :math:`\textrm{max}_i |x_i(t + \Delta t) - x_i(t)|` for each unknown in list, + update :method:`~struphy.propagators.base.Propagator.feec_vars` + and update ghost regions. + + Parameters + ---------- + variables_new : list[StencilVector | BlockVector] + Same sequence as in :method:`~struphy.propagators.base.Propagator.feec_vars` + but with the updated variables, + i.e. for feec_vars = [e, b] we must have variables_new = [e_updated, b_updated]. + + Returns + ------- + diffs : list + A list [max(abs(self.feec_vars - variables_new)), ...] for all variables in self.feec_vars and variables_new. + """ + + diffs = [] + + for i, new in enumerate(variables_new): + assert type(new) is type(self.feec_vars[i]) + + # calculate maximum of difference abs(old - new) + diffs += [np.max(np.abs(self.feec_vars[i].toarray() - new.toarray()))] + + # copy new variables into self.feec_vars + new.copy(out=self.feec_vars[i]) + + # important: sync processes! + self.feec_vars[i].update_ghost_regions() + + return diffs + def add_init_kernel( self, kernel, @@ -246,12 +245,9 @@ def add_init_kernel( The arguments for the kernel function. """ if comps is None: - comps = xp.array([0]) # case for scalar evaluation + comps = np.array([0]) # case for scalar evaluation else: - comps = xp.array(comps, dtype=int) - - if not hasattr(self, "_init_kernels"): - self._init_kernels = [] + comps = np.array(comps, dtype=int) self._init_kernels += [ ( @@ -259,7 +255,7 @@ def add_init_kernel( column_nr, comps, args_init, - ), + ) ] def add_eval_kernel( @@ -297,15 +293,12 @@ def add_eval_kernel( """ if isinstance(alpha, int) or isinstance(alpha, float): alpha = [alpha] * 6 - alpha = xp.array(alpha) + alpha = np.array(alpha) if comps is None: - comps = xp.array([0]) # case for scalar evaluation + comps = np.array([0]) # case for scalar evaluation else: - comps = xp.array(comps, dtype=int) - - if not hasattr(self, "_eval_kernels"): - self._eval_kernels = [] + comps = np.array(comps, dtype=int) self._eval_kernels += [ ( @@ -314,5 +307,5 @@ def add_eval_kernel( column_nr, comps, args_eval, - ), + ) ] diff --git a/src/struphy/propagators/propagators_coupling.py b/src/struphy/propagators/propagators_coupling.py index 0b8760b6a..88472a177 100644 --- a/src/struphy/propagators/propagators_coupling.py +++ b/src/struphy/propagators/propagators_coupling.py @@ -1,34 +1,22 @@ "Particle and FEEC variables are updated." -from dataclasses import dataclass -from typing import Literal - -import cunumpy as xp -from line_profiler import profile -from psydac.ddm.mpi import mpi as MPI from psydac.linalg.block import BlockVector -from psydac.linalg.solvers import inverse from psydac.linalg.stencil import StencilVector from struphy.feec import preconditioner from struphy.feec.linear_operators import LinOpWithTransp -from struphy.io.options import OptsGenSolver, OptsMassPrecond, OptsSymmSolver, OptsVecSpace, check_option from struphy.io.setup import descend_options_dict from struphy.kinetic_background.base import Maxwellian from struphy.kinetic_background.maxwellians import Maxwellian3D from struphy.linear_algebra.schur_solver import SchurSolver -from struphy.linear_algebra.solver import DiscreteGradientSolverParameters, SolverParameters -from struphy.models.variables import FEECVariable, PICVariable -from struphy.ode.utils import ButcherTableau -from struphy.pic import utilities_kernels from struphy.pic.accumulation import accum_kernels, accum_kernels_gc -from struphy.pic.accumulation.filter import FilterParameters -from struphy.pic.accumulation.particles_to_grid import Accumulator, AccumulatorVector +from struphy.pic.accumulation.particles_to_grid import Accumulator from struphy.pic.particles import Particles5D, Particles6D from struphy.pic.pushing import pusher_kernels, pusher_kernels_gc from struphy.pic.pushing.pusher import Pusher from struphy.polar.basic import PolarVector from struphy.propagators.base import Propagator +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel @@ -39,9 +27,9 @@ class VlasovAmpere(Propagator): .. math:: -& \int_\Omega \frac{\partial \mathbf E}{\partial t} \cdot \mathbf F\,\textrm d \mathbf x = - \frac{\alpha^2}{\varepsilon} \int_\Omega \int_{\mathbb{R}^3} f \mathbf{v} \cdot \mathbf F \, \text{d}^3 \mathbf{v} \,\textrm d \mathbf x \qquad \forall \, \mathbf F \in H(\textnormal{curl}) \,, + c_1 \int_\Omega \int_{\mathbb{R}^3} f \mathbf{v} \cdot \mathbf F \, \text{d}^3 \mathbf{v} \,\textrm d \mathbf x \qquad \forall \, \mathbf F \in H(\textnormal{curl}) \,, \\[2mm] - &\frac{\partial f}{\partial t} + \frac{1}{\varepsilon}\, \mathbf{E} + &\frac{\partial f}{\partial t} + c_2\, \mathbf{E} \cdot \frac{\partial f}{\partial \mathbf{v}} = 0 \,. :ref:`time_discret`: Crank-Nicolson (implicit mid-point). System size reduction via :class:`~struphy.linear_algebra.schur_solver.SchurSolver`, such that @@ -55,8 +43,8 @@ class VlasovAmpere(Propagator): = \frac{\Delta t}{2} \begin{bmatrix} - 0 & - \frac{\alpha^2}{\varepsilon} \mathbb L^1 \bar{DF^{-1}} \bar{\mathbf w} \\ - \frac{1}{\varepsilon} \bar{DF^{-\top}} \left(\mathbb L^1\right)^\top & 0 + 0 & - c_1 \mathbb L^1 \bar{DF^{-1}} \bar{\mathbf w} \\ + c_2 \bar{DF^{-\top}} \left(\mathbb L^1\right)^\top & 0 \end{bmatrix} \begin{bmatrix} \mathbf{e}^{n+1} + \mathbf{e}^n \\ @@ -67,90 +55,59 @@ class VlasovAmpere(Propagator): .. math:: - A = \mathbb M^1\,,\qquad B = \frac{\alpha^2}{2\varepsilon} \mathbb L^1 \bar{DF^{-1}} \bar{\mathbf w}\,,\qquad C = - \frac{1}{2\varepsilon} \bar{DF^{-\top}} \left(\mathbb L^1\right)^\top \,. + A = \mathbb M^1\,,\qquad B = \frac{c_1}{2} \mathbb L^1 \bar{DF^{-1}} \bar{\mathbf w}\,,\qquad C = - \frac{c_2}{2} \bar{DF^{-\top}} \left(\mathbb L^1\right)^\top \,. The accumulation matrix and vector assembled in :class:`~struphy.pic.accumulation.particles_to_grid.Accumulator` are .. math:: M = BC \,,\qquad V = B \mathbf V \,. - """ - - class Variables: - def __init__(self): - self._e: FEECVariable = None - self._ions: PICVariable = None - - @property - def e(self) -> FEECVariable: - return self._e - @e.setter - def e(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hcurl" - self._e = new + Note + ---------- + * For :class:`~struphy.models.kinetic.VlasovAmpereOneSpecies`: :math:`c_1 = \kappa^2 \,, \, c_2 = 1` + * For :class:`~struphy.models.kinetic.VlasovMaxwellOneSpecies`: :math:`c_1 = \alpha^2/\varepsilon \,, \, c_2 = 1/\varepsilon` + * For :class:`~struphy.models.hybrid.ColdPlasmaVlasov`: :math:`c_1 = \nu\alpha^2/\varepsilon_\textrm{c} \,, \, c_2 = 1/\varepsilon_\textrm{h}` + """ - @property - def ions(self) -> PICVariable: - return self._ions - - @ions.setter - def ions(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles6D" - self._ions = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - # scaling factors - alpha = self.variables.ions.species.equation_params.alpha - epsilon = self.variables.ions.species.equation_params.epsilon - - self._c1 = alpha**2 / epsilon - self._c2 = 1.0 / epsilon - - self._info = self.options.solver_params.info + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + e: BlockVector, + particles: Particles6D, + *, + c1: float = 1.0, + c2: float = 1.0, + solver=options(default=True)["solver"], + ): + super().__init__(e, particles) + + self._c1 = c1 + self._c2 = c2 + self._info = solver["info"] # get accumulation kernel accum_kernel = Pyccelkernel(accum_kernels.vlasov_maxwell) # Initialize Accumulator object - particles = self.variables.ions.particles - self._accum = Accumulator( particles, "Hcurl", @@ -162,19 +119,19 @@ def allocate(self): ) # Create buffers to store temporarily e and its sum with old e - self._e_tmp = self.derham.Vh["1"].zeros() - self._e_scale = self.derham.Vh["1"].zeros() - self._e_sum = self.derham.Vh["1"].zeros() + self._e_tmp = e.space.zeros() + self._e_scale = e.space.zeros() + self._e_sum = e.space.zeros() # ================================ # ========= Schur Solver ========= # ================================ # Preconditioner - if self.options.precond is None: + if solver["type"][1] == None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(self.mass_ops.M1) # Define block matrix [[A B], [C I]] (without time step size dt in the diagonals) @@ -185,9 +142,11 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) # Instantiate particle pusher @@ -207,7 +166,6 @@ def allocate(self): alpha_in_kernel=1.0, ) - @profile def __call__(self, dt): # accumulate self._accum() @@ -216,14 +174,14 @@ def __call__(self, dt): self._schur_solver.BC = self._accum.operators[0] self._schur_solver.BC *= -self._c1 * self._c2 / 4.0 - # Vector for Schur solver + # Vector for schur solver self._e_scale *= 0.0 self._e_scale += self._accum.vectors[0] self._e_scale *= self._c1 / 2.0 # new e coeffs self._e_tmp, info = self._schur_solver( - self.variables.e.spline.vector, + self.feec_vars[0], self._e_scale, dt, out=self._e_tmp, @@ -231,7 +189,7 @@ def __call__(self, dt): # mid-point e-field (no tmps created here) self._e_sum *= 0.0 - self._e_sum += self.variables.e.spline.vector + self._e_sum += self.feec_vars[0] self._e_sum += self._e_tmp self._e_sum *= 0.5 @@ -239,30 +197,29 @@ def __call__(self, dt): self._pusher(dt) # update_weights - if self.variables.ions.species.weights_params.control_variate: - self.variables.ions.particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() # write new coeffs into self.variables - (max_de,) = self.update_feec_variables(e=self._e_tmp) + (max_de,) = self.feec_vars_update(self._e_tmp) # Print out max differences for weights and e-field if self._info: print("Status for VlasovMaxwell:", info["success"]) print("Iterations for VlasovMaxwell:", info["niter"]) print("Maxdiff e1 for VlasovMaxwell:", max_de) - particles = self.variables.ions.particles - buffer_idx = particles.bufferindex - max_diff = xp.max( - xp.abs( - xp.sqrt( - particles.markers_wo_holes[:, 3] ** 2 - + particles.markers_wo_holes[:, 4] ** 2 - + particles.markers_wo_holes[:, 5] ** 2, + buffer_idx = self.particles[0].bufferindex + max_diff = np.max( + np.abs( + np.sqrt( + self.particles[0].markers_wo_holes[:, 3] ** 2 + + self.particles[0].markers_wo_holes[:, 4] ** 2 + + self.particles[0].markers_wo_holes[:, 5] ** 2, ) - - xp.sqrt( - particles.markers_wo_holes[:, buffer_idx + 3] ** 2 - + particles.markers_wo_holes[:, buffer_idx + 4] ** 2 - + particles.markers_wo_holes[:, buffer_idx + 5] ** 2, + - np.sqrt( + self.particles[0].markers_wo_holes[:, buffer_idx + 3] ** 2 + + self.particles[0].markers_wo_holes[:, buffer_idx + 4] ** 2 + + self.particles[0].markers_wo_holes[:, buffer_idx + 5] ** 2, ), ), ) @@ -326,85 +283,50 @@ class EfieldWeights(Propagator): """ - class Variables: - def __init__(self): - self._e: FEECVariable = None - self._ions: PICVariable = None - - @property - def e(self) -> FEECVariable: - return self._e - - @e.setter - def e(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hcurl" - self._e = new - - @property - def ions(self) -> PICVariable: - return self._ions - - @ions.setter - def ions(self, new): - assert isinstance(new, PICVariable) - assert new.space in ("Particles6D", "DeltaFParticles6D") - self._ions = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - alpha: float = 1.0 - kappa: float = 1.0 - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - if self.solver_params is None: - self.solver_params = SolverParameters() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._alpha = self.options.alpha - self._kappa = self.options.kappa - - backgrounds = self.variables.ions.backgrounds - # use single Maxwellian - if isinstance(backgrounds, list): - self._f0 = backgrounds[0] - else: - self._f0 = backgrounds - assert isinstance(self._f0, Maxwellian3D), "The background distribution function must be a uniform Maxwellian!" - self._vth = self._f0.maxw_params["vth1"][0] - - self._info = self.options.solver_params.info + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + e: BlockVector, + particles: Particles6D, + *, + alpha: float = 1.0, + kappa: float = 1.0, + f0: Maxwellian = None, + solver=options(default=True)["solver"], + ): + super().__init__(e, particles) + + if f0 is None: + f0 = Maxwellian3D() + assert isinstance(f0, Maxwellian3D) + + self._alpha = alpha + self._kappa = kappa + self._f0 = f0 + assert self._f0.maxw_params["vth1"] == self._f0.maxw_params["vth2"] == self._f0.maxw_params["vth3"] + self._vth = self._f0.maxw_params["vth1"] + + self._info = solver["info"] # Initialize Accumulator object - e = self.variables.e.spline.vector - particles = self.variables.ions.particles - self._accum = Accumulator( particles, "Hcurl", @@ -421,18 +343,18 @@ def allocate(self): self._e_sum = e.space.zeros() # marker storage - self._f0_values = xp.zeros(particles.markers.shape[0], dtype=float) - self._old_weights = xp.empty(particles.markers.shape[0], dtype=float) + self._f0_values = np.zeros(particles.markers.shape[0], dtype=float) + self._old_weights = np.empty(particles.markers.shape[0], dtype=float) # ================================ # ========= Schur Solver ========= # ================================ # Preconditioner - if self.options.precond == None: + if solver["type"][1] == None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(self.mass_ops.M1) # Define block matrix [[A B], [C I]] (without time step size dt in the diagonals) @@ -443,9 +365,11 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) # Instantiate particle pusher @@ -468,17 +392,14 @@ def allocate(self): ) def __call__(self, dt): - en = self.variables.e.spline.vector - particles = self.variables.ions.particles - # evaluate f0 and accumulate self._f0_values[:] = self._f0( - particles.markers[:, 0], - particles.markers[:, 1], - particles.markers[:, 2], - particles.markers[:, 3], - particles.markers[:, 4], - particles.markers[:, 5], + self.particles[0].markers[:, 0], + self.particles[0].markers[:, 1], + self.particles[0].markers[:, 2], + self.particles[0].markers[:, 3], + self.particles[0].markers[:, 4], + self.particles[0].markers[:, 5], ) self._accum(self._f0_values) @@ -494,34 +415,35 @@ def __call__(self, dt): # new e-field (no tmps created here) self._e_tmp, info = self._schur_solver( - xn=en, + xn=self.feec_vars[0], Byn=self._e_scale, dt=dt, out=self._e_tmp, ) # Store old weights - self._old_weights[~particles.holes] = particles.markers_wo_holes[:, 6] + self._old_weights[~self.particles[0].holes] = self.particles[0].markers_wo_holes[:, 6] # Compute (e^{n+1} + e^n) (no tmps created here) self._e_sum *= 0.0 - self._e_sum += en + self._e_sum += self.feec_vars[0] self._e_sum += self._e_tmp # Update weights self._pusher(dt) # write new coeffs into self.variables - max_de = self.update_feec_variables(e=self._e_tmp) + (max_de,) = self.feec_vars_update(self._e_tmp) # Print out max differences for weights and e-field if self._info: print("Status for StepEfieldWeights:", info["success"]) print("Iterations for StepEfieldWeights:", info["niter"]) print("Maxdiff e1 for StepEfieldWeights:", max_de) - max_diff = xp.max( - xp.abs( - self._old_weights[~particles.holes] - particles.markers[~particles.holes, 6], + max_diff = np.max( + np.abs( + self._old_weights[~self.particles[0].holes] + - self.particles[0].markers[~self.particles[0].holes, 6], ), ) print("Maxdiff weights for StepEfieldWeights:", max_diff) @@ -551,133 +473,119 @@ class PressureCoupling6D(Propagator): \begin{bmatrix} {\mathbb M^n}(u^{n+1} + u^n) \\ \bar W (V^{n+1} + V^{n} \end{bmatrix} \,. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - self._energetic_ions: PICVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - @property - def energetic_ions(self) -> PICVariable: - return self._energetic_ions - - @energetic_ions.setter - def energetic_ions(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles6D" - self._energetic_ions = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - ep_scale: float = 1.0 - u_space: OptsVecSpace = "Hdiv" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - filter_params: FilterParameters = None - use_perp_model: bool = True - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - assert isinstance(self.ep_scale, float) - assert isinstance(self.use_perp_model, bool) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.filter_params is None: - self.filter_params = FilterParameters() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - if self.options.u_space == "H1vec": - self._u_form_int = 0 + @staticmethod + def options(default=False): + dct = {} + dct["use_perp_model"] = [True, False] + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["filter"] = { + "use_filter": None, + "modes": (1), + "repeat": 1, + "alpha": 0.5, + } + dct["boundary_cut"] = { + "e1": 0.0, + "e2": 0.0, + "e3": 0.0, + } + dct["turn_off"] = False + + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + particles: Particles5D, + u: BlockVector | PolarVector, + *, + use_perp_model: bool = options(default=True)["use_perp_model"], + u_space: str, + solver: dict = options(default=True)["solver"], + coupling_params: dict, + filter: dict = options(default=True)["filter"], + boundary_cut: dict = options(default=True)["boundary_cut"], + ): + super().__init__(particles, u) + + self._G = self.derham.grad + self._GT = self.derham.grad.transpose() + + self._info = solver["info"] + if self.derham.comm is None: + self._rank = 0 else: - self._u_form_int = int(self.derham.space_to_form[self.options.u_space]) + self._rank = self.derham.comm.Get_rank() + + assert u_space in {"Hcurl", "Hdiv", "H1vec"} - if self.options.u_space == "Hcurl": + if u_space == "Hcurl": id_Mn = "M1n" id_X = "X1" - elif self.options.u_space == "Hdiv": + elif u_space == "Hdiv": id_Mn = "M2n" id_X = "X2" - elif self.options.u_space == "H1vec": + elif u_space == "H1vec": id_Mn = "Mvn" id_X = "Xv" - # call operatros - id_M = "M" + self.derham.space_to_form[self.options.u_space] + "n" - _A = getattr(self.mass_ops, id_M) - self._X = getattr(self.basis_ops, id_X) - self._XT = self._X.transpose() - grad = self.derham.grad - gradT = grad.transpose() + if u_space == "H1vec": + self._space_key_int = 0 + else: + self._space_key_int = int( + self.derham.space_to_form[u_space], + ) # Preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) - pc = pc_class(getattr(self.mass_ops, id_M)) + pc_class = getattr(preconditioner, solver["type"][1]) + pc = pc_class(getattr(self.mass_ops, id_Mn)) # Call the accumulation and Pusher class - if self.options.use_perp_model: + if use_perp_model: accum_ker = Pyccelkernel(accum_kernels.pc_lin_mhd_6d) pusher_ker = Pyccelkernel(pusher_kernels.push_pc_GXu) else: accum_ker = Pyccelkernel(accum_kernels.pc_lin_mhd_6d_full) pusher_ker = Pyccelkernel(pusher_kernels.push_pc_GXu_full) - # define Accumulator and arguments + self._coupling_mat = coupling_params["Ah"] / coupling_params["Ab"] + self._coupling_vec = coupling_params["Ah"] / coupling_params["Ab"] + self._scale_push = 1 + + self._boundary_cut_e1 = boundary_cut["e1"] + self._ACC = Accumulator( - self.variables.energetic_ions.particles, - "Hcurl", # TODO:check + particles, + "Hcurl", accum_ker, self.mass_ops, self.domain.args_domain, add_vector=True, symmetry="pressure", - filter_params=self.options.filter_params, + filter_params=filter, ) - self._tmp_g1 = grad.codomain.zeros() - self._tmp_g2 = grad.codomain.zeros() - self._tmp_g3 = grad.codomain.zeros() + self._tmp_g1 = self._G.codomain.zeros() + self._tmp_g2 = self._G.codomain.zeros() + self._tmp_g3 = self._G.codomain.zeros() # instantiate Pusher - args_pusher_kernel = ( + args_kernel = ( self.derham.args_derham, self._tmp_g1[0]._data, self._tmp_g1[1]._data, @@ -688,20 +596,38 @@ def allocate(self): self._tmp_g3[0]._data, self._tmp_g3[1]._data, self._tmp_g3[2]._data, + self._boundary_cut_e1, ) self._pusher = Pusher( - self.variables.energetic_ions.particles, + particles, pusher_ker, - args_pusher_kernel, + args_kernel, self.domain.args_domain, alpha_in_kernel=1.0, ) - self.u_temp = self.variables.u.spline.vector.space.zeros() - self.u_temp2 = self.variables.u.spline.vector.space.zeros() + # Define operators + self._A = getattr(self.mass_ops, id_Mn) + self._X = getattr(self.basis_ops, id_X) + self._XT = self._X.transpose() + + # Instantiate schur solver with dummy BC + self._schur_solver = SchurSolver( + self._A, + self._XT @ self._X, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], + ) + + self.u_temp = u.space.zeros() + self.u_temp2 = u.space.zeros() self._tmp = self._X.codomain.zeros() - self._BV = self.variables.u.spline.vector.space.zeros() + self._BV = u.space.zeros() self._MAT = [ [self._ACC.operators[0], self._ACC.operators[1], self._ACC.operators[2]], @@ -711,32 +637,20 @@ def allocate(self): self._GT_VEC = BlockVector(self.derham.Vh["v"]) - _BC = -1 / 4 * self._XT @ self.GT_MAT_G(self.derham, self._MAT) @ self._X - - self._schur_solver = SchurSolver( - _A, - _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, - ) - def __call__(self, dt): - # current FE coeffs - un = self.variables.u.spline.vector - - # operators - grad = self.derham.grad - gradT = grad.transpose() + # current u + un = self.feec_vars[0] + un.update_ghost_regions() # acuumulate MAT and VEC - self._ACC( - self.options.ep_scale, - ) + self._ACC(self._coupling_mat, self._coupling_vec, self._boundary_cut_e1) # update GT_VEC for i in range(3): - self._GT_VEC[i] = gradT.dot(self._ACC.vectors[i]) + self._GT_VEC[i] = self._GT.dot(self._ACC.vectors[i]) + + # define BC and B dot V of the Schur block matrix [[A, B], [C, I]] + self._schur_solver.BC = -1 / 4 * self._XT @ self.GT_MAT_G(self.derham, self._MAT) @ self._X self._BV = self._XT.dot(self._GT_VEC) * (-1 / 2) @@ -749,9 +663,9 @@ def __call__(self, dt): # calculate GXu Xu = self._X.dot(_u, out=self._tmp) - GXu_1 = grad.dot(Xu[0], out=self._tmp_g1) - GXu_2 = grad.dot(Xu[1], out=self._tmp_g2) - GXu_3 = grad.dot(Xu[2], out=self._tmp_g3) + GXu_1 = self._G.dot(Xu[0], out=self._tmp_g1) + GXu_2 = self._G.dot(Xu[1], out=self._tmp_g2) + GXu_3 = self._G.dot(Xu[2], out=self._tmp_g3) GXu_1.update_ghost_regions() GXu_2.update_ghost_regions() @@ -761,16 +675,16 @@ def __call__(self, dt): self._pusher(dt) # write new coeffs into Propagator.variables - diffs = self.update_feec_variables(u=un1) + (max_du,) = self.feec_vars_update(un1) # update weights in case of control variate - if self.variables.energetic_ions.species.weights_params.control_variate: - self.variables.energetic_ions.particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() - if self.options.solver_params.info and MPI.COMM_WORLD.Get_rank() == 0: + if self._info and self._rank == 0: print("Status for StepPressurecoupling:", info["success"]) print("Iterations for StepPressurecoupling:", info["niter"]) - print("Maxdiff u1 for StepPressurecoupling:", diffs["u"]) + print("Maxdiff u1 for StepPressurecoupling:", max_du) print() class GT_MAT_G(LinOpWithTransp): @@ -789,8 +703,8 @@ class GT_MAT_G(LinOpWithTransp): def __init__(self, derham, MAT, transposed=False): self._derham = derham - self._grad = derham.grad - self._gradT = derham.grad.transpose() + self._G = derham.grad + self._GT = derham.grad.transpose() self._domain = derham.Vh["v"] self._codomain = derham.Vh["v"] @@ -844,9 +758,9 @@ def dot(self, v, out=None): for i in range(3): for j in range(3): - self._temp += self._MAT[i][j].dot(self._grad.dot(v[j])) + self._temp += self._MAT[i][j].dot(self._G.dot(v[j])) - self._vector[i] = self._gradT.dot(self._temp) + self._vector[i] = self._GT.dot(self._temp) self._temp *= 0.0 self._vector.update_ghost_regions() @@ -876,107 +790,88 @@ class CurrentCoupling6DCurrent(Propagator): :ref:`time_discret`: Crank-Nicolson (implicit mid-point). System size reduction via :class:`~struphy.linear_algebra.schur_solver.SchurSolver`. """ - class Variables: - def __init__(self): - self._ions: PICVariable = None - self._u: FEECVariable = None - - @property - def ions(self) -> PICVariable: - return self._ions + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["filter"] = { + "use_filter": None, + "modes": (1), + "repeat": 1, + "alpha": 0.5, + } + dct["boundary_cut"] = { + "e1": 0.0, + "e2": 0.0, + "e3": 0.0, + } + dct["turn_off"] = False + + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + particles: Particles6D, + u: BlockVector, + *, + u_space: str, + b_eq: BlockVector | PolarVector, + b_tilde: BlockVector | PolarVector, + Ab: int = 1, + Ah: int = 1, + epsilon: float = 1.0, + solver: dict = options(default=True)["solver"], + filter: dict = options(default=True)["filter"], + boundary_cut: dict = options(default=True)["boundary_cut"], + ): + super().__init__(particles, u) + + if u_space == "H1vec": + self._space_key_int = 0 + else: + self._space_key_int = int( + self.derham.space_to_form[u_space], + ) - @ions.setter - def ions(self, new): - assert isinstance(new, PICVariable) - assert new.space in ("Particles6D") - self._ions = new + self._b_eq = b_eq + self._b_tilde = b_tilde - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - b_tilde: FEECVariable = None - u_space: OptsVecSpace = "Hdiv" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - filter_params: FilterParameters = None - boundary_cut: tuple = (0.0, 0.0, 0.0) - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - assert self.b_tilde.space == "Hdiv" - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._space_key_int = int(self.derham.space_to_form[self.options.u_space]) - - particles = self.variables.ions.particles - u = self.variables.u.spline.vector - self._b_eq = self.projected_equil.b2 - self._b_tilde = self.options.b_tilde.spline.vector - - self._info = self.options.solver_params.info + self._info = solver["info"] if self.derham.comm is None: self._rank = 0 else: self._rank = self.derham.comm.Get_rank() - Ah = self.variables.ions.species.mass_number - Ab = self.variables.u.species.mass_number - epsilon = self.variables.ions.species.equation_params.epsilon - self._coupling_mat = Ah / Ab / epsilon**2 self._coupling_vec = Ah / Ab / epsilon self._scale_push = 1.0 / epsilon - self._boundary_cut_e1 = self.options.boundary_cut[0] + self._boundary_cut_e1 = boundary_cut["e1"] # load accumulator self._accumulator = Accumulator( particles, - self.options.u_space, + u_space, Pyccelkernel(accum_kernels.cc_lin_mhd_6d_2), self.mass_ops, self.domain.args_domain, add_vector=True, symmetry="symm", - filter_params=self.options.filter_params, + filter_params=filter, ) # if self.particles[0].control_variate: @@ -1005,17 +900,17 @@ def allocate(self): # self.particles[0].f0.n, *quad_pts, kind='0', squeeze_out=False, coordinates='logical') # # memory allocation for magnetic field at quadrature points - # self._b_quad1 = xp.zeros_like(self._nuh0_at_quad[0]) - # self._b_quad2 = xp.zeros_like(self._nuh0_at_quad[0]) - # self._b_quad3 = xp.zeros_like(self._nuh0_at_quad[0]) + # self._b_quad1 = np.zeros_like(self._nuh0_at_quad[0]) + # self._b_quad2 = np.zeros_like(self._nuh0_at_quad[0]) + # self._b_quad3 = np.zeros_like(self._nuh0_at_quad[0]) # # memory allocation for (self._b_quad x self._nuh0_at_quad) * self._coupling_vec - # self._vec1 = xp.zeros_like(self._nuh0_at_quad[0]) - # self._vec2 = xp.zeros_like(self._nuh0_at_quad[0]) - # self._vec3 = xp.zeros_like(self._nuh0_at_quad[0]) + # self._vec1 = np.zeros_like(self._nuh0_at_quad[0]) + # self._vec2 = np.zeros_like(self._nuh0_at_quad[0]) + # self._vec3 = np.zeros_like(self._nuh0_at_quad[0]) # FEM spaces and basis extraction operators for u and b - u_id = self.derham.space_to_form[self.options.u_space] + u_id = self.derham.space_to_form[u_space] self._EuT = self.derham.extraction_ops[u_id].transpose() self._EbT = self.derham.extraction_ops["2"].transpose() @@ -1029,15 +924,15 @@ def allocate(self): self._u_avg2 = self._EuT.codomain.zeros() # load particle pusher kernel - if self.options.u_space == "Hcurl": + if u_space == "Hcurl": kernel = Pyccelkernel(pusher_kernels.push_bxu_Hcurl) - elif self.options.u_space == "Hdiv": + elif u_space == "Hdiv": kernel = Pyccelkernel(pusher_kernels.push_bxu_Hdiv) - elif self.options.u_space == "H1vec": + elif u_space == "H1vec": kernel = Pyccelkernel(pusher_kernels.push_bxu_H1vec) else: raise ValueError( - f'{self.options.u_space =} not valid, choose from "Hcurl", "Hdiv" or "H1vec.', + f'{u_space = } not valid, choose from "Hcurl", "Hdiv" or "H1vec.', ) # instantiate Pusher @@ -1064,10 +959,10 @@ def allocate(self): _A = getattr(self.mass_ops, "M" + u_id + "n") # preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(_A) _BC = -1 / 4 * self._accumulator.operators[0] @@ -1075,15 +970,17 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], ) def __call__(self, dt): # pointer to old coefficients - particles = self.variables.ions.particles - un = self.variables.u.spline.vector + un = self.feec_vars[0] # sum up total magnetic field b_full1 = b_eq + b_tilde (in-place) self._b_eq.copy(out=self._b_full1) @@ -1153,11 +1050,11 @@ def __call__(self, dt): self._pusher(self._scale_push * dt) # write new coeffs into Propagator.variables - max_du = self.update_feec_variables(u=un1) + max_du = self.feec_vars_update(un1) # update weights in case of control variate - if particles.control_variate: - particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() if self._info and self._rank == 0: print("Status for CurrentCoupling6DCurrent:", info["success"]) @@ -1202,198 +1099,292 @@ class CurrentCoupling5DCurlb(Propagator): For the detail explanation of the notations, see `2022_DriftKineticCurrentCoupling `_. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - self._energetic_ions: PICVariable = None + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["filter"] = { + "use_filter": None, + "modes": (1), + "repeat": 1, + "alpha": 0.5, + } + dct["boundary_cut"] = { + "e1": 0.0, + "e2": 0.0, + "e3": 0.0, + } + dct["turn_off"] = False + + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + particles: Particles5D, + u: BlockVector, + *, + b: BlockVector, + b_eq: BlockVector, + unit_b1: BlockVector, + absB0: StencilVector, + gradB1: BlockVector, + curl_unit_b2: BlockVector, + u_space: str, + solver: dict = options(default=True)["solver"], + filter: dict = options(default=True)["filter"], + coupling_params: dict, + epsilon: float = 1.0, + boundary_cut: dict = options(default=True)["boundary_cut"], + ): + super().__init__(particles, u) + + assert u_space in {"Hcurl", "Hdiv", "H1vec"} + + if u_space == "H1vec": + self._space_key_int = 0 + else: + self._space_key_int = int( + self.derham.space_to_form[u_space], + ) - @property - def u(self) -> FEECVariable: - return self._u + self._epsilon = epsilon + self._b = b + self._b_eq = b_eq + self._unit_b1 = unit_b1 + self._absB0 = absB0 + self._gradB1 = gradB1 + self._curl_norm_b = curl_unit_b2 - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new + self._info = solver["info"] - @property - def energetic_ions(self) -> PICVariable: - return self._energetic_ions - - @energetic_ions.setter - def energetic_ions(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles5D" - self._energetic_ions = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - b_tilde: FEECVariable = None - ep_scale: float = 1.0 - u_space: OptsVecSpace = "Hdiv" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - filter_params: FilterParameters = None - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - assert isinstance(self.b_tilde, FEECVariable) - assert isinstance(self.ep_scale, float) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.filter_params is None: - self.filter_params = FilterParameters() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - if self.options.u_space == "H1vec": - self._u_form_int = 0 + if self.derham.comm is None: + self._rank = 0 else: - self._u_form_int = int(self.derham.space_to_form[self.options.u_space]) + self._rank = self.derham.comm.Get_rank() - # call operatros - id_M = "M" + self.derham.space_to_form[self.options.u_space] + "n" - _A = getattr(self.mass_ops, id_M) + self._coupling_mat = coupling_params["Ah"] / coupling_params["Ab"] + self._coupling_vec = coupling_params["Ah"] / coupling_params["Ab"] + self._scale_push = 1 - # Preconditioner - if self.options.precond is None: - pc = None - else: - pc_class = getattr(preconditioner, self.options.precond) - pc = pc_class(getattr(self.mass_ops, id_M)) + self._boundary_cut_e1 = boundary_cut["e1"] - # magnetic equilibrium field - unit_b1 = self.projected_equil.unit_b1 - curl_unit_b1 = self.projected_equil.curl_unit_b1 - self._b2 = self.projected_equil.b2 + u_id = self.derham.space_to_form[u_space] + self._E0T = self.derham.extraction_ops["0"].transpose() + self._EuT = self.derham.extraction_ops[u_id].transpose() + self._E2T = self.derham.extraction_ops["2"].transpose() + self._E1T = self.derham.extraction_ops["1"].transpose() - # magnetic field - self._b_tilde = self.options.b_tilde.spline.vector + self._unit_b1 = self._E1T.dot(self._unit_b1) + self._curl_norm_b = self._E2T.dot(self._curl_norm_b) + self._curl_norm_b.update_ghost_regions() + self._absB0 = self._E0T.dot(self._absB0) - # scaling factor - epsilon = self.variables.energetic_ions.species.equation_params.epsilon + # define system [[A B], [C I]] [u_new, v_new] = [[A -B], [-C I]] [u_old, v_old] (without time step size dt) + _A = getattr(self.mass_ops, "M" + u_id + "n") + + # preconditioner + if solver["type"][1] is None: + pc = None + else: + pc_class = getattr(preconditioner, solver["type"][1]) + pc = pc_class(_A) # temporary vectors to avoid memory allocation - self._b_full = self._b2.space.zeros() - self._u_new = self.variables.u.spline.vector.space.zeros() - self._u_avg = self.variables.u.spline.vector.space.zeros() + self._b_full1 = self._b_eq.space.zeros() + self._b_full2 = self._E2T.codomain.zeros() + self._u_new = u.space.zeros() + self._u_avg1 = u.space.zeros() + self._u_avg2 = self._EuT.codomain.zeros() - # define Accumulator and arguments + # Call the accumulation and Pusher class self._ACC = Accumulator( - self.variables.energetic_ions.particles, - self.options.u_space, - Pyccelkernel(accum_kernels_gc.cc_lin_mhd_5d_curlb), + particles, + u_space, + Pyccelkernel(accum_kernels_gc.cc_lin_mhd_5d_J1), self.mass_ops, self.domain.args_domain, add_vector=True, symmetry="symm", - filter_params=self.options.filter_params, + filter_params=filter, ) - self._args_accum_kernel = ( - epsilon, - self.options.ep_scale, - self._b_full[0]._data, - self._b_full[1]._data, - self._b_full[2]._data, - unit_b1[0]._data, - unit_b1[1]._data, - unit_b1[2]._data, - curl_unit_b1[0]._data, - curl_unit_b1[1]._data, - curl_unit_b1[2]._data, - self._u_form_int, - ) - - # define Pusher - if self.options.u_space == "Hcurl": - pusher_kernel = Pyccelkernel(pusher_kernels_gc.push_gc_cc_J1_Hcurl) - elif self.options.u_space == "Hdiv": - pusher_kernel = Pyccelkernel(pusher_kernels_gc.push_gc_cc_J1_Hdiv) - elif self.options.u_space == "H1vec": - pusher_kernel = Pyccelkernel(pusher_kernels_gc.push_gc_cc_J1_H1vec) + if u_space == "Hcurl": + kernel = Pyccelkernel(pusher_kernels_gc.push_gc_cc_J1_Hcurl) + elif u_space == "Hdiv": + kernel = Pyccelkernel(pusher_kernels_gc.push_gc_cc_J1_Hdiv) + elif u_space == "H1vec": + kernel = Pyccelkernel(pusher_kernels_gc.push_gc_cc_J1_H1vec) else: raise ValueError( - f'{self.options.u_space =} not valid, choose from "Hcurl", "Hdiv" or "H1vec.', + f'{u_space = } not valid, choose from "Hcurl", "Hdiv" or "H1vec.', ) - args_pusher_kernel = ( + # instantiate Pusher + args_kernel = ( self.derham.args_derham, - epsilon, - self._b_full[0]._data, - self._b_full[1]._data, - self._b_full[2]._data, - unit_b1[0]._data, - unit_b1[1]._data, - unit_b1[2]._data, - curl_unit_b1[0]._data, - curl_unit_b1[1]._data, - curl_unit_b1[2]._data, - self._u_avg[0]._data, - self._u_avg[1]._data, - self._u_avg[2]._data, + self._epsilon, + self._b_full2[0]._data, + self._b_full2[1]._data, + self._b_full2[2]._data, + self._unit_b1[0]._data, + self._unit_b1[1]._data, + self._unit_b1[2]._data, + self._curl_norm_b[0]._data, + self._curl_norm_b[1]._data, + self._curl_norm_b[2]._data, + self._u_avg2[0]._data, + self._u_avg2[1]._data, + self._u_avg2[2]._data, + 0.0, ) self._pusher = Pusher( - self.variables.energetic_ions.particles, - pusher_kernel, - args_pusher_kernel, + particles, + kernel, + args_kernel, self.domain.args_domain, alpha_in_kernel=1.0, ) + # define BC and B dot V of the Schur block matrix [[A, B], [C, I]] _BC = -1 / 4 * self._ACC.operators[0] + # call SchurSolver class self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], ) def __call__(self, dt): - # current FE coeffs - un = self.variables.u.spline.vector + un = self.feec_vars[0] # sum up total magnetic field b_full1 = b_eq + b_tilde (in-place) - b_full = self._b2.copy(out=self._b_full) + b_full = self._b_eq.copy(out=self._b_full1) + + if self._b is not None: + self._b_full1 += self._b + + # extract coefficients to tensor product space (in-place) + Eb_full = self._E2T.dot(b_full, out=self._b_full2) + + # update ghost regions because of non-local access in accumulation kernel! + Eb_full.update_ghost_regions() + + # perform accumulation (either with or without control variate) + # if self.particles[0].control_variate: - b_full += self._b_tilde - b_full.update_ghost_regions() + # # evaluate magnetic field at quadrature points (in-place) + # WeightedMassOperator.eval_quad(self.derham.Vh_fem['2'], self._b_full2, + # out=[self._b_at_quad[0], self._b_at_quad[1], self._b_at_quad[2]]) + + # # evaluate B_parallel + # self._B_para_at_quad = np.sum( + # p * q for p, q in zip(self._unit_b1_at_quad, self._b_at_quad)) + # self._B_para_at_quad += self._unit_b1_dot_curl_norm_b_at_quad + + # # assemble (B x)(curl norm_b)(curl norm_b)(B x) / B_star_para² / det_df³ * (f0.u_para² + f0.vth_para²) * f0.n + # self._mat11[:, :, :] = (self._b_at_quad[1]*self._curl_norm_b_at_quad[2] - + # self._b_at_quad[2]*self._curl_norm_b_at_quad[1])**2 * \ + # self._control_const * self._coupling_mat / \ + # self._det_df_at_quad**3 / self._B_para_at_quad**2 + # self._mat12[:, :, :] = (self._b_at_quad[1]*self._curl_norm_b_at_quad[2] - + # self._b_at_quad[2]*self._curl_norm_b_at_quad[1]) * \ + # (self._b_at_quad[2]*self._curl_norm_b_at_quad[0] - + # self._b_at_quad[0]*self._curl_norm_b_at_quad[2]) * \ + # self._control_const * self._coupling_mat / \ + # self._det_df_at_quad**3 / self._B_para_at_quad**2 + # self._mat13[:, :, :] = (self._b_at_quad[1]*self._curl_norm_b_at_quad[2] - + # self._b_at_quad[2]*self._curl_norm_b_at_quad[1]) * \ + # (self._b_at_quad[0]*self._curl_norm_b_at_quad[1] - + # self._b_at_quad[1]*self._curl_norm_b_at_quad[0]) * \ + # self._control_const * self._coupling_mat / \ + # self._det_df_at_quad**3 / self._B_para_at_quad**2 + # self._mat22[:, :, :] = (self._b_at_quad[2]*self._curl_norm_b_at_quad[0] - + # self._b_at_quad[0]*self._curl_norm_b_at_quad[2])**2 * \ + # self._control_const * self._coupling_mat / \ + # self._det_df_at_quad**3 / self._B_para_at_quad**2 + # self._mat23[:, :, :] = (self._b_at_quad[2]*self._curl_norm_b_at_quad[0] - + # self._b_at_quad[0]*self._curl_norm_b_at_quad[2]) * \ + # (self._b_at_quad[0]*self._curl_norm_b_at_quad[1] - + # self._b_at_quad[1]*self._curl_norm_b_at_quad[0]) * \ + # self._control_const * self._coupling_mat / \ + # self._det_df_at_quad**3 / self._B_para_at_quad**2 + # self._mat33[:, :, :] = (self._b_at_quad[0]*self._curl_norm_b_at_quad[1] - + # self._b_at_quad[1]*self._curl_norm_b_at_quad[0])**2 * \ + # self._control_const * self._coupling_mat / \ + # self._det_df_at_quad**3 / self._B_para_at_quad**2 + + # self._mat21[:, :, :] = -self._mat12 + # self._mat31[:, :, :] = -self._mat13 + # self._mat32[:, :, :] = -self._mat23 + + # # assemble (B x)(curl norm_b) / B_star_para / det_df * (f0.u_para² + f0.vth_para²) * f0.n + # self._vec1[:, :, :] = (self._b_at_quad[1]*self._curl_norm_b_at_quad[2] - + # self._b_at_quad[2]*self._curl_norm_b_at_quad[1]) * \ + # self._control_const * self._coupling_vec / \ + # self._det_df_at_quad / self._B_para_at_quad + # self._vec2[:, :, :] = (self._b_at_quad[2]*self._curl_norm_b_at_quad[0] - + # self._b_at_quad[0]*self._curl_norm_b_at_quad[2]) * \ + # self._control_const * self._coupling_vec / \ + # self._det_df_at_quad / self._B_para_at_quad + # self._vec3[:, :, :] = (self._b_at_quad[0]*self._curl_norm_b_at_quad[1] - + # self._b_at_quad[1]*self._curl_norm_b_at_quad[0]) * \ + # self._control_const * self._coupling_vec / \ + # self._det_df_at_quad / self._B_para_at_quad + + # self._ACC.accumulate(self.particles[0], self._epsilon, + # Eb_full[0]._data, Eb_full[1]._data, Eb_full[2]._data, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._curl_norm_b[0]._data, self._curl_norm_b[1]._data, self._curl_norm_b[2]._data, + # self._space_key_int, self._coupling_mat, self._coupling_vec, 0.1, + # control_mat=[[None, self._mat12, self._mat13], + # [self._mat21, None, self._mat23], + # [self._mat31, self._mat32, None]], + # control_vec=[self._vec1, self._vec2, self._vec3]) + # else: + # self._ACC.accumulate(self.particles[0], self._epsilon, + # Eb_full[0]._data, Eb_full[1]._data, Eb_full[2]._data, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._curl_norm_b[0]._data, self._curl_norm_b[1]._data, self._curl_norm_b[2]._data, + # self._space_key_int, self._coupling_mat, self._coupling_vec, 0.1) self._ACC( - *self._args_accum_kernel, + self._epsilon, + Eb_full[0]._data, + Eb_full[1]._data, + Eb_full[2]._data, + self._unit_b1[0]._data, + self._unit_b1[1]._data, + self._unit_b1[2]._data, + self._curl_norm_b[0]._data, + self._curl_norm_b[1]._data, + self._curl_norm_b[2]._data, + self._space_key_int, + self._coupling_mat, + self._coupling_vec, + self._boundary_cut_e1, ) - # solve + # update u coefficients un1, info = self._schur_solver( un, -self._ACC.vectors[0] / 2, @@ -1402,25 +1393,27 @@ def __call__(self, dt): ) # call pusher kernel with average field (u_new + u_old)/2 and update ghost regions because of non-local access in kernel - _u = un.copy(out=self._u_avg) + _u = un.copy(out=self._u_avg1) _u += un1 _u *= 0.5 - _u.update_ghost_regions() + _Eu = self._EuT.dot(_u, out=self._u_avg2) - self._pusher(dt) + _Eu.update_ghost_regions() - # update u coefficients - diffs = self.update_feec_variables(u=un1) + self._pusher(self._scale_push * dt) + + # write new coeffs into Propagator.variables + (max_du,) = self.feec_vars_update(un1) # update_weights - if self.variables.energetic_ions.species.weights_params.control_variate: - self.variables.energetic_ions.particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() - if self.options.solver_params.info and MPI.COMM_WORLD.Get_rank() == 0: + if self._info and self._rank == 0: print("Status for CurrentCoupling5DCurlb:", info["success"]) print("Iterations for CurrentCoupling5DCurlb:", info["niter"]) - print("Maxdiff up for CurrentCoupling5DCurlb:", diffs["u"]) + print("Maxdiff up for CurrentCoupling5DCurlb:", max_du) print() @@ -1460,722 +1453,440 @@ class CurrentCoupling5DGradB(Propagator): For the detail explanation of the notations, see `2022_DriftKineticCurrentCoupling `_. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - self._energetic_ions: PICVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - @property - def energetic_ions(self) -> PICVariable: - return self._energetic_ions - - @energetic_ions.setter - def energetic_ions(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles5D" - self._energetic_ions = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsAlgo = Literal[ - "discrete_gradient", - "explicit", - ] - # propagator options - b_tilde: FEECVariable = None - ep_scale: float = 1.0 - algo: OptsAlgo = "explicit" - butcher: ButcherTableau = None - u_space: OptsVecSpace = "Hdiv" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - filter_params: FilterParameters = None - dg_solver_params: DiscreteGradientSolverParameters = None - - def __post_init__(self): - # checks - check_option(self.algo, self.OptsAlgo) - check_option(self.u_space, OptsVecSpace) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - assert isinstance(self.b_tilde, FEECVariable) - assert isinstance(self.ep_scale, float) - - # defaults - if self.algo == "explicit" and self.butcher is None: - self.butcher = ButcherTableau() - - if self.algo == "discrete_gradient" and self.dg_solver_params is None: - self.dg_solver_params = DiscreteGradientSolverParameters() - - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.filter_params is None: - self.filter_params = FilterParameters() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - if self.options.u_space == "H1vec": - self._u_form_int = 0 + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["algo"] = ["rk4", "forward_euler", "heun2", "rk2", "heun3"] + dct["filter"] = { + "use_filter": None, + "modes": (1), + "repeat": 1, + "alpha": 0.5, + } + dct["boundary_cut"] = { + "e1": 0.0, + "e2": 0.0, + "e3": 0.0, + } + dct["turn_off"] = False + + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + particles: Particles5D, + u: BlockVector, + *, + b: BlockVector, + b_eq: BlockVector, + unit_b1: BlockVector, + unit_b2: BlockVector, + absB0: StencilVector, + gradB1: BlockVector, + curl_unit_b2: BlockVector, + u_space: str, + solver: dict = options(default=True)["solver"], + algo: dict = options(default=True)["algo"], + filter: dict = options(default=True)["filter"], + coupling_params: dict, + epsilon: float = 1.0, + boundary_cut: dict = options(default=True)["boundary_cut"], + ): + from psydac.linalg.solvers import inverse + + from struphy.ode.utils import ButcherTableau + + super().__init__(particles, u) + + assert u_space in {"Hcurl", "Hdiv", "H1vec"} + + if u_space == "H1vec": + self._space_key_int = 0 else: - self._u_form_int = int(self.derham.space_to_form[self.options.u_space]) - - # call operatros - id_M = "M" + self.derham.space_to_form[self.options.u_space] + "n" - self._A = getattr(self.mass_ops, id_M) - self._PB = getattr(self.basis_ops, "PB") - - # Preconditioner - if self.options.precond is None: - pc = None - else: - pc_class = getattr(preconditioner, self.options.precond) - pc = pc_class(getattr(self.mass_ops, id_M)) - - # linear solver - self._A_inv = inverse( - self._A, - self.options.solver, - pc=pc, - tol=self.options.solver_params.tol, - maxiter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, - ) - # magnetic equilibrium field - unit_b1 = self.projected_equil.unit_b1 - curl_unit_b1 = self.projected_equil.curl_unit_b1 - self._b2 = self.projected_equil.b2 - gradB1 = self.projected_equil.gradB1 - absB0 = self.projected_equil.absB0 - - # magnetic field - self._b_tilde = self.options.b_tilde.spline.vector - - # scaling factor - epsilon = self.variables.energetic_ions.species.equation_params.epsilon - - if self.options.algo == "explicit": - # temporary vectors to avoid memory allocation - self._b_full = self._b2.space.zeros() - self._u_new = self.variables.u.spline.vector.space.zeros() - self._u_temp = self.variables.u.spline.vector.space.zeros() - self._ku = self.variables.u.spline.vector.space.zeros() - self._PB_b = self._PB.codomain.zeros() - self._grad_PB_b = self.derham.grad.codomain.zeros() - - # define Accumulator and arguments - self._ACC = Accumulator( - self.variables.energetic_ions.particles, - self.options.u_space, - Pyccelkernel(accum_kernels_gc.cc_lin_mhd_5d_gradB), - self.mass_ops, - self.domain.args_domain, - add_vector=True, - symmetry="symm", - filter_params=self.options.filter_params, - ) - - self._args_accum_kernel = ( - epsilon, - self.options.ep_scale, - self._b_full[0]._data, - self._b_full[1]._data, - self._b_full[2]._data, - unit_b1[0]._data, - unit_b1[1]._data, - unit_b1[2]._data, - curl_unit_b1[0]._data, - curl_unit_b1[1]._data, - curl_unit_b1[2]._data, - self._grad_PB_b[0]._data, - self._grad_PB_b[1]._data, - self._grad_PB_b[2]._data, - self._u_form_int, + self._space_key_int = int( + self.derham.space_to_form[u_space], ) - # define Pusher - if self.options.u_space == "Hdiv": - self._pusher_kernel = pusher_kernels_gc.push_gc_cc_J2_stage_Hdiv - elif self.options.u_space == "H1vec": - self._pusher_kernel = pusher_kernels_gc.push_gc_cc_J2_stage_H1vec - else: - raise ValueError( - f'{self.options.u_space =} not valid, choose from "Hdiv" or "H1vec.', - ) - - # temp fix due to refactoring of ButcherTableau: - butcher = self.options.butcher - import numpy as np + self._epsilon = epsilon + self._b = b + self._b_eq = b_eq + self._unit_b1 = unit_b1 + self._unit_b2 = unit_b2 + self._absB0 = absB0 + self._gradB1 = gradB1 + self._curl_norm_b = curl_unit_b2 - butcher._a = xp.diag(butcher.a, k=-1) - butcher._a = xp.array(list(butcher.a) + [0.0]) - - self._args_pusher_kernel = ( - self.domain.args_domain, - self.derham.args_derham, - epsilon, - self._b_full[0]._data, - self._b_full[1]._data, - self._b_full[2]._data, - unit_b1[0]._data, - unit_b1[1]._data, - unit_b1[2]._data, - curl_unit_b1[0]._data, - curl_unit_b1[1]._data, - curl_unit_b1[2]._data, - self._u_temp[0]._data, - self._u_temp[1]._data, - self._u_temp[2]._data, - self.options.butcher.a, - self.options.butcher.b, - self.options.butcher.c, - ) + self._info = solver["info"] + if self.derham.comm is None: + self._rank = 0 else: - # temporary vectors to avoid memory allocation - self._b_full = self._b2.space.zeros() - self._PB_b = self._PB.codomain.zeros() - self._grad_PB_b = self.derham.grad.codomain.zeros() - self._u_old = self.variables.u.spline.vector.space.zeros() - self._u_new = self.variables.u.spline.vector.space.zeros() - self._u_diff = self.variables.u.spline.vector.space.zeros() - self._u_mid = self.variables.u.spline.vector.space.zeros() - self._M2n_dot_u = self.variables.u.spline.vector.space.zeros() - self._ku = self.variables.u.spline.vector.space.zeros() - self._u_temp = self.variables.u.spline.vector.space.zeros() - - # Call the accumulation and Pusher class - accum_kernel_init = accum_kernels_gc.cc_lin_mhd_5d_gradB_dg_init - accum_kernel = accum_kernels_gc.cc_lin_mhd_5d_gradB_dg - self._accum_kernel_en_fB_mid = utilities_kernels.eval_gradB_ediff - - self._args_accum_kernel = ( - epsilon, - self.options.ep_scale, - self._b_tilde[0]._data, - self._b_tilde[1]._data, - self._b_tilde[2]._data, - self._b2[0]._data, - self._b2[1]._data, - self._b2[2]._data, - unit_b1[0]._data, - unit_b1[1]._data, - unit_b1[2]._data, - curl_unit_b1[0]._data, - curl_unit_b1[1]._data, - curl_unit_b1[2]._data, - self._grad_PB_b[0]._data, - self._grad_PB_b[1]._data, - self._grad_PB_b[2]._data, - gradB1[0]._data, - gradB1[1]._data, - gradB1[2]._data, - self._u_form_int, - ) + self._rank = self.derham.comm.Get_rank() - self._args_accum_kernel_en_fB_mid = ( - self.domain.args_domain, - self.derham.args_derham, - gradB1[0]._data, - gradB1[1]._data, - gradB1[2]._data, - self._grad_PB_b[0]._data, - self._grad_PB_b[1]._data, - self._grad_PB_b[2]._data, - ) + self._coupling_mat = coupling_params["Ah"] / coupling_params["Ab"] + self._coupling_vec = coupling_params["Ah"] / coupling_params["Ab"] + self._scale_push = 1 - self._ACC_init = AccumulatorVector( - self.variables.energetic_ions.particles, - self.options.u_space, - accum_kernel_init, - self.mass_ops, - self.domain.args_domain, - filter_params=self.options.filter_params, - ) + self._boundary_cut_e1 = boundary_cut["e1"] - self._ACC = AccumulatorVector( - self.variables.energetic_ions.particles, - self.options.u_space, - accum_kernel, - self.mass_ops, - self.domain.args_domain, - filter_params=self.options.filter_params, - ) + u_id = self.derham.space_to_form[u_space] + self._E0T = self.derham.extraction_ops["0"].transpose() + self._EuT = self.derham.extraction_ops[u_id].transpose() + self._E1T = self.derham.extraction_ops["1"].transpose() + self._E2T = self.derham.extraction_ops["2"].transpose() - self._args_pusher_kernel_init = ( - self.domain.args_domain, - self.derham.args_derham, - epsilon, - self._b_full[0]._data, - self._b_full[1]._data, - self._b_full[2]._data, - unit_b1[0]._data, - unit_b1[1]._data, - unit_b1[2]._data, - curl_unit_b1[0]._data, - curl_unit_b1[1]._data, - curl_unit_b1[2]._data, - self.variables.u.spline.vector[0]._data, - self.variables.u.spline.vector[1]._data, - self.variables.u.spline.vector[2]._data, - ) + self._PB = getattr(self.basis_ops, "PB") - self._args_pusher_kernel = ( - self.domain.args_domain, - self.derham.args_derham, - epsilon, - self._b_full[0]._data, - self._b_full[1]._data, - self._b_full[2]._data, - unit_b1[0]._data, - unit_b1[1]._data, - unit_b1[2]._data, - curl_unit_b1[0]._data, - curl_unit_b1[1]._data, - curl_unit_b1[2]._data, - self._u_mid[0]._data, - self._u_mid[1]._data, - self._u_mid[2]._data, - self._u_temp[0]._data, - self._u_temp[1]._data, - self._u_temp[2]._data, - ) + self._unit_b1 = self._E1T.dot(self._unit_b1) + self._unit_b2 = self._E2T.dot(self._unit_b2) + self._curl_norm_b = self._E2T.dot(self._curl_norm_b) + self._absB0 = self._E0T.dot(self._absB0) - self._pusher_kernel_init = pusher_kernels_gc.push_gc_cc_J2_dg_init_Hdiv - self._pusher_kernel = pusher_kernels_gc.push_gc_cc_J2_dg_Hdiv + _A = getattr(self.mass_ops, "M" + u_id + "n") - def __call__(self, dt): - # current FE coeffs - un = self.variables.u.spline.vector + # preconditioner + if solver["type"][1] is None: + pc = None + else: + pc_class = getattr(preconditioner, solver["type"][1]) + pc = pc_class(_A) - # particle markers and idx - particles = self.variables.energetic_ions.particles - holes = particles.holes - args_markers = particles.args_markers - markers = args_markers.markers - first_init_idx = args_markers.first_init_idx - first_free_idx = args_markers.first_free_idx + self._solver = inverse( + _A, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], + ) - # clear buffer - markers[:, first_init_idx:-2] = 0.0 + # Call the accumulation and Pusher class + self._ACC = Accumulator( + particles, + u_space, + Pyccelkernel(accum_kernels_gc.cc_lin_mhd_5d_J2), + self.mass_ops, + self.domain.args_domain, + add_vector=True, + symmetry="symm", + filter_params=filter, + ) - # save old marker positions - markers[:, first_init_idx : first_init_idx + 3] = markers[:, :3] + # if self.particles[0].control_variate: - # sum up total magnetic field b_full1 = b_eq + b_tilde (in-place) - b_full = self._b2.copy(out=self._b_full) + # # control variate method is only valid with Maxwellian distributions + # assert isinstance(self.particles[0].f0, Maxwellian) + # assert params['u_space'] == 'Hdiv' - b_full += self._b_tilde - b_full.update_ghost_regions() + # self._ACC.init_control_variate(self.mass_ops) - if self.options.algo == "explicit": - PB_b = self._PB.dot(b_full, out=self._PB_b) - grad_PB_b = self.derham.grad.dot(PB_b, out=self._grad_PB_b) - grad_PB_b.update_ghost_regions() + # # evaluate and save n0 at quadrature points + # quad_pts = [quad_grid[nquad].points.flatten() + # for quad_grid, nquad in zip(self.derham.get_quad_grids(self.derham.Vh_fem['0']), self.derham.nquads)] - # save old u - u_new = un.copy(out=self._u_new) + # self._n0_at_quad = self.domain.push( + # self.particles[0].f0.n, *quad_pts, kind='0', squeeze_out=False) - for stage in range(self.options.butcher.n_stages): - # accumulate - self._ACC( - *self._args_accum_kernel, - ) + # # evaluate unit_b1 (1form) dot epsilon * u0_parallel * curl_norm_b/|det(DF)| at quadrature points + # quad_pts_array = self.domain.prepare_eval_pts(*quad_pts)[:3] - # push particles - self._pusher_kernel( - dt, - stage, - args_markers, - *self._args_pusher_kernel, - ) + # u0_parallel_at_quad = self.particles[0].f0.u( + # *quad_pts_array)[0] - if particles.mpi_comm is not None: - particles.mpi_sort_markers() - else: - particles.apply_kinetic_bc() + # vth_perp = self.particles[0].f0.vth(*quad_pts_array)[1] - # solve linear system for updating u coefficients - ku = self._A_inv.dot(self._ACC.vectors[0], out=self._ku) - info = self._A_inv._info + # absB0_at_quad = WeightedMassOperator.eval_quad( + # self.derham.Vh_fem['0'], self._absB0) - # calculate u^{n+1}_k - u_temp = un.copy(out=self._u_temp) - u_temp += ku * dt * self.options.butcher.a[stage] + # self._det_df_at_quad = self.domain.jacobian_det( + # *quad_pts, squeeze_out=False) - u_temp.update_ghost_regions() + # self._unit_b1_at_quad = WeightedMassOperator.eval_quad( + # self.derham.Vh_fem['1'], self._unit_b1) - # calculate u^{n+1} - u_new += ku * dt * self.options.butcher.b[stage] + # curl_norm_b_at_quad = WeightedMassOperator.eval_quad( + # self.derham.Vh_fem['2'], self._curl_norm_b) - if self.options.solver_params.info and MPI.COMM_WORLD.Get_rank() == 0: - print("Stage: ", stage) - print("Status for CurrentCoupling5DGradB:", info["success"]) - print("Iterations for CurrentCoupling5DGradB:", info["niter"]) - print() + # self._unit_b1_dot_curl_norm_b_at_quad = np.sum( + # p * q for p, q in zip(self._unit_b1_at_quad, curl_norm_b_at_quad)) - # update u coefficients - diffs = self.update_feec_variables(u=u_new) + # self._unit_b1_dot_curl_norm_b_at_quad /= self._det_df_at_quad + # self._unit_b1_dot_curl_norm_b_at_quad *= self._epsilon + # self._unit_b1_dot_curl_norm_b_at_quad *= u0_parallel_at_quad - # clear the buffer - markers[:, first_init_idx:-2] = 0.0 + # # precalculate constant 2 * f0.vth_perp² / B0 * f0.n for control MAT and VEC + # self._control_const = vth_perp**2 / absB0_at_quad * self._n0_at_quad - # update_weights - if self.variables.energetic_ions.species.weights_params.control_variate: - particles.update_weights() + # # assemble the matrix (G_inv)(unit_b1 x)(G_inv) + # G_inv_at_quad = self.domain.metric_inv( + # *quad_pts, squeeze_out=False) - if self.options.solver_params.info and MPI.COMM_WORLD.Get_rank() == 0: - print("Maxdiff up for CurrentCoupling5DGradB:", diffs["u"]) - print() + # self._G_inv_bx_G_inv_at_quad = [[np.zeros_like(self._n0_at_quad), np.zeros_like(self._n0_at_quad), np.zeros_like(self._n0_at_quad)], + # [np.zeros_like(self._n0_at_quad), np.zeros_like( + # self._n0_at_quad), np.zeros_like(self._n0_at_quad)], + # [np.zeros_like(self._n0_at_quad), np.zeros_like(self._n0_at_quad), np.zeros_like(self._n0_at_quad)]] - else: - # total number of markers - n_mks_tot = particles.Np - - # relaxation factor - alpha = self.options.dg_solver_params.relaxation_factor - - # eval parallel tilde b and its gradient - PB_b = self._PB.dot(self._b_tilde, out=self._PB_b) - PB_b.update_ghost_regions() - grad_PB_b = self.derham.grad.dot(PB_b, out=self._grad_PB_b) - grad_PB_b.update_ghost_regions() - - # save old u - u_old = un.copy(out=self._u_old) - u_new = un.copy(out=self._u_new) - - # save en_U_old - self._A.dot(un, out=self._M2n_dot_u) - en_U_old = un.inner(self._M2n_dot_u) / 2.0 - - # save en_fB_old - particles.save_magnetic_energy(PB_b) - en_fB_old = xp.sum(markers[~holes, 8].dot(markers[~holes, 5])) * self.options.ep_scale - en_fB_old /= n_mks_tot - - buffer_array = xp.array([en_fB_old]) - - if particles.mpi_comm is not None: - particles.mpi_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) + # for j in range(3): + # temp = (-self._unit_b1_at_quad[2]*G_inv_at_quad[1, j] + self._unit_b1_at_quad[1]*G_inv_at_quad[2, j], + # self._unit_b1_at_quad[2]*G_inv_at_quad[0, j] - + # self._unit_b1_at_quad[0]*G_inv_at_quad[2, j], + # -self._unit_b1_at_quad[1]*G_inv_at_quad[0, j] + self._unit_b1_at_quad[0]*G_inv_at_quad[1, j]) - if particles.clone_config is not None: - particles.clone_config.inter_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) + # for i in range(3): + # self._G_inv_bx_G_inv_at_quad[i][j] = np.sum( + # p * q for p, q in zip(G_inv_at_quad[i], temp[:])) - en_fB_old = buffer_array[0] - en_tot_old = en_U_old + en_fB_old + # # memory allocation of magnetic field at quadrature points + # self._b_at_quad = [np.zeros_like(self._n0_at_quad), + # np.zeros_like(self._n0_at_quad), + # np.zeros_like(self._n0_at_quad)] - # initial guess - self._ACC_init(*self._args_accum_kernel) + # # memory allocation of parallel magnetic field at quadrature points + # self._B_para_at_quad = np.zeros_like(self._n0_at_quad) - ku = self._A_inv.dot(self._ACC_init.vectors[0], out=self._ku) - u_new += ku * dt + # # memory allocation of gradient of parallel magnetic field at quadrature points + # self._grad_PBb_at_quad = (np.zeros_like(self._n0_at_quad), + # np.zeros_like(self._n0_at_quad), + # np.zeros_like(self._n0_at_quad)) + # # memory allocation for temporary matrix + # self._temp = [[np.zeros_like(self._n0_at_quad), np.zeros_like(self._n0_at_quad), np.zeros_like(self._n0_at_quad)], + # [np.zeros_like(self._n0_at_quad), np.zeros_like( + # self._n0_at_quad), np.zeros_like(self._n0_at_quad)], + # [np.zeros_like(self._n0_at_quad), np.zeros_like(self._n0_at_quad), np.zeros_like(self._n0_at_quad)]] - u_new.update_ghost_regions() + # # memory allocation for control VEC + # self._vec1 = np.zeros_like(self._n0_at_quad) + # self._vec2 = np.zeros_like(self._n0_at_quad) + # self._vec3 = np.zeros_like(self._n0_at_quad) - # save en_U_new - self._A.dot(u_new, out=self._M2n_dot_u) - en_U_new = u_new.inner(self._M2n_dot_u) / 2.0 + # choose algorithm + self._butcher = ButcherTableau(algo) + # temp fix due to refactoring of ButcherTableau: + self._butcher._a = np.diag(self._butcher.a, k=-1) + self._butcher._a = np.array(list(self._butcher.a) + [0.0]) - # push eta - self._pusher_kernel_init( - dt, - args_markers, - *self._args_pusher_kernel_init, + # instantiate Pusher + if u_space == "Hdiv": + kernel = Pyccelkernel(pusher_kernels_gc.push_gc_cc_J2_stage_Hdiv) + elif u_space == "H1vec": + kernel = Pyccelkernel(pusher_kernels_gc.push_gc_cc_J2_stage_H1vec) + else: + raise ValueError( + f'{u_space = } not valid, choose from "Hdiv" or "H1vec.', ) - if particles.mpi_comm is not None: - particles.mpi_sort_markers(apply_bc=False) - - # save en_fB_new - particles.save_magnetic_energy(PB_b) - en_fB_new = xp.sum(markers[~holes, 8].dot(markers[~holes, 5])) * self.options.ep_scale - en_fB_new /= n_mks_tot - - buffer_array = xp.array([en_fB_new]) - - if particles.mpi_comm is not None: - particles.mpi_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) - - if particles.clone_config is not None: - particles.clone_config.inter_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) - - en_fB_new = buffer_array[0] - - # fixed-point iterations - iter_num = 0 - - while True: - iter_num += 1 - - if self.options.dg_solver_params.verbose and MPI.COMM_WORLD.Get_rank() == 0: - print("# of iteration: ", iter_num) - - # calculate discrete gradient - # save u^{n+1, k} - u_old = u_new.copy(out=self._u_old) - - u_diff = u_old.copy(out=self._u_diff) - u_diff -= un - u_diff.update_ghost_regions() - - u_mid = u_old.copy(out=self._u_mid) - u_mid += un - u_mid /= 2.0 - u_mid.update_ghost_regions() - - # save H^{n+1, k} - markers[~holes, first_free_idx : first_free_idx + 3] = markers[~holes, 0:3] - - # calculate denominator ||z^{n+1, k} - z^n||^2 - sum_u_diff_loc = xp.sum((u_diff.toarray() ** 2)) - - sum_H_diff_loc = xp.sum( - (markers[~holes, :3] - markers[~holes, first_init_idx : first_init_idx + 3]) ** 2, - ) - - buffer_array = xp.array([sum_u_diff_loc]) - - if particles.mpi_comm is not None: - particles.mpi_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) - - denominator = buffer_array[0] + args_kernel = (self.derham.args_derham,) - buffer_array = xp.array([sum_H_diff_loc]) + self._pusher = Pusher( + particles, + kernel, + args_kernel, + self.domain.args_domain, + alpha_in_kernel=1.0, + ) - if particles.mpi_comm is not None: - particles.mpi_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) + # temporary vectors to avoid memory allocation + self._b_full1 = self._b_eq.space.zeros() + self._b_full2 = self._E2T.codomain.zeros() + self._u_new = u.space.zeros() + self._Eu_new = self._EuT.codomain.zeros() + self._u_temp1 = u.space.zeros() + self._u_temp2 = u.space.zeros() + self._Eu_temp = self._EuT.codomain.zeros() + self._tmp1 = self._E0T.codomain.zeros() + self._tmp2 = self._gradB1.space.zeros() + self._tmp3 = self._E1T.codomain.zeros() - if particles.clone_config is not None: - particles.clone_config.inter_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) + def __call__(self, dt): + un = self.feec_vars[0] - denominator += buffer_array[0] + # sum up total magnetic field b_full1 = b_eq + b_tilde (in-place) + b_full = self._b_eq.copy(out=self._b_full1) - # sorting markers at mid-point - if particles.mpi_comm is not None: - particles.mpi_sort_markers(apply_bc=False, alpha=0.5) + if self._b is not None: + self._b_full1 += self._b - self._accum_kernel_en_fB_mid( - args_markers, - *self._args_accum_kernel_en_fB_mid, - first_free_idx + 3, - ) - en_fB_mid = xp.sum(markers[~holes, first_free_idx + 3].dot(markers[~holes, 5])) * self.options.ep_scale + PBb = self._PB.dot(self._b, out=self._tmp1) + grad_PBb = self.derham.grad.dot(PBb, out=self._tmp2) + grad_PBb += self._gradB1 - en_fB_mid /= n_mks_tot + Eb_full = self._E2T.dot(b_full, out=self._b_full2) + Eb_full.update_ghost_regions() - buffer_array = xp.array([en_fB_mid]) + Egrad_PBb = self._E1T.dot(grad_PBb, out=self._tmp3) + Egrad_PBb.update_ghost_regions() - if particles.mpi_comm is not None: - particles.mpi_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) + # perform accumulation (either with or without control variate) + # if self.particles[0].control_variate: - if particles.clone_config is not None: - particles.clone_config.inter_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) + # # evaluate magnetic field at quadrature points (in-place) + # WeightedMassOperator.eval_quad(self.derham.Vh_fem['2'], self._b_full2, + # out=[self._b_at_quad[0], self._b_at_quad[1], self._b_at_quad[2]]) + + # # evaluate B_parallel + # self._B_para_at_quad = np.sum( + # p * q for p, q in zip(self._unit_b1_at_quad, self._b_at_quad)) + # self._B_para_at_quad += self._unit_b1_dot_curl_norm_b_at_quad + + # # evaluate grad B_parallel + # WeightedMassOperator.eval_quad(self.derham.Vh_fem['1'], self._tmp3, + # out=[self._grad_PBb_at_quad[0], self._grad_PBb_at_quad[1], self._grad_PBb_at_quad[2]]) + + # # assemble temp = (B x)(G_inv)(unit_b1 x)(G_inv) + # for i in range(3): + # self._temp[0][i] = -self._b_at_quad[2]*self._G_inv_bx_G_inv_at_quad[1][i] + \ + # self._b_at_quad[1]*self._G_inv_bx_G_inv_at_quad[2][i] + # self._temp[1][i] = +self._b_at_quad[2]*self._G_inv_bx_G_inv_at_quad[0][i] - \ + # self._b_at_quad[0]*self._G_inv_bx_G_inv_at_quad[2][i] + # self._temp[2][i] = -self._b_at_quad[1]*self._G_inv_bx_G_inv_at_quad[0][i] + \ + # self._b_at_quad[0]*self._G_inv_bx_G_inv_at_quad[1][i] + + # # assemble (temp)(grad B_parallel) / B_star_para * 2 * f0.vth_perp² / B0 * f0.n + # self._vec1[:, :, :] = np.sum(p * q for p, q in zip(self._temp[0][:], self._grad_PBb_at_quad)) * \ + # self._control_const * self._coupling_vec / self._B_para_at_quad + # self._vec2[:, :, :] = np.sum(p * q for p, q in zip(self._temp[1][:], self._grad_PBb_at_quad)) * \ + # self._control_const * self._coupling_vec / self._B_para_at_quad + # self._vec3[:, :, :] = np.sum(p * q for p, q in zip(self._temp[2][:], self._grad_PBb_at_quad)) * \ + # self._control_const * self._coupling_vec / self._B_para_at_quad + + # save old u + _u_new = un.copy(out=self._u_new) + _u_temp = un.copy(out=self._u_temp1) - en_fB_mid = buffer_array[0] + # save old marker positions + self.particles[0].markers[ + ~self.particles[0].holes, + 11:14, + ] = self.particles[0].markers[~self.particles[0].holes, 0:3] + + for stage in range(self._butcher.n_stages): + # accumulate RHS + # if self.particles[0].control_variate: + # self._ACC.accumulate(self.particles[0], self._epsilon, + # Eb_full[0]._data, Eb_full[1]._data, Eb_full[2]._data, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._unit_b2[0]._data, self._unit_b2[1]._data, self._unit_b2[2]._data, + # self._curl_norm_b[0]._data, self._curl_norm_b[1]._data, self._curl_norm_b[2]._data, + # Egrad_PBb[0]._data, Egrad_PBb[1]._data, Egrad_PBb[2]._data, + # self._space_key_int, self._coupling_mat, self._coupling_vec, 0., + # control_vec=[self._vec1, self._vec2, self._vec3]) + # else: + # self._ACC.accumulate(self.particles[0], self._epsilon, + # Eb_full[0]._data, Eb_full[1]._data, Eb_full[2]._data, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._unit_b2[0]._data, self._unit_b2[1]._data, self._unit_b2[2]._data, + # self._curl_norm_b[0]._data, self._curl_norm_b[1]._data, self._curl_norm_b[2]._data, + # Egrad_PBb[0]._data, Egrad_PBb[1]._data, Egrad_PBb[2]._data, + # self._space_key_int, self._coupling_mat, self._coupling_vec, 0.) + + self._ACC( + self._epsilon, + Eb_full[0]._data, + Eb_full[1]._data, + Eb_full[2]._data, + self._unit_b1[0]._data, + self._unit_b1[1]._data, + self._unit_b1[2]._data, + self._unit_b2[0]._data, + self._unit_b2[1]._data, + self._unit_b2[2]._data, + self._curl_norm_b[0]._data, + self._curl_norm_b[1]._data, + self._curl_norm_b[2]._data, + Egrad_PBb[0]._data, + Egrad_PBb[1]._data, + Egrad_PBb[2]._data, + self._space_key_int, + self._coupling_mat, + self._coupling_vec, + self._boundary_cut_e1, + ) - if denominator == 0.0: - const = 0.0 - else: - const = (en_fB_new - en_fB_old - en_fB_mid) / denominator + # push particles + Eu = self._EuT.dot(_u_temp, out=self._Eu_temp) + Eu.update_ghost_regions() - # update u^{n+1, k} - self._ACC(*self._args_accum_kernel, const) + self._pusher.kernel( + dt, + stage, + self.particles[0].args_markers, + self.domain.args_domain, + self.derham.args_derham, + self._epsilon, + Eb_full[0]._data, + Eb_full[1]._data, + Eb_full[2]._data, + self._unit_b1[0]._data, + self._unit_b1[1]._data, + self._unit_b1[2]._data, + self._unit_b2[0]._data, + self._unit_b2[1]._data, + self._unit_b2[2]._data, + self._curl_norm_b[0]._data, + self._curl_norm_b[1]._data, + self._curl_norm_b[2]._data, + Eu[0]._data, + Eu[1]._data, + Eu[2]._data, + self._butcher.a, + self._butcher.b, + self._butcher.c, + self._boundary_cut_e1, + ) - ku = self._A_inv.dot(self._ACC.vectors[0], out=self._ku) + if self.particles[0].mpi_comm is not None: + self.particles[0].mpi_sort_markers() - u_new = un.copy(out=self._u_new) - u_new += ku * dt - u_new *= alpha - u_new += u_old * (1.0 - alpha) + # solve linear system for updated u coefficients + _ku = self._solver.dot(self._ACC.vectors[0], out=self._u_temp2) - u_new.update_ghost_regions() + # calculate u^{n+1}_k + _u_temp = un.copy(out=self._u_temp1) + _u_temp += _ku * dt * self._butcher.a[stage] - # update en_U_new - self._A.dot(u_new, out=self._M2n_dot_u) - en_U_new = u_new.inner(self._M2n_dot_u) / 2.0 + # calculate u^{n+1} + _u_new += _ku * dt * self._butcher.b[stage] - # update H^{n+1, k} - self._pusher_kernel( - dt, - args_markers, - *self._args_pusher_kernel, - const, - alpha, + if self._info and self._rank == 0: + print("Stage:", stage) + print( + "Status for CurrentCoupling5DGradB:", + self._solver._info["success"], ) - - sum_H_diff_loc = xp.sum( - xp.abs(markers[~holes, 0:3] - markers[~holes, first_free_idx : first_free_idx + 3]), + print( + "Iterations for CurrentCoupling5DGradB:", + self._solver._info["niter"], ) - if particles.mpi_comm is not None: - particles.mpi_sort_markers(apply_bc=False) - - # update en_fB_new - particles.save_magnetic_energy(PB_b) - en_fB_new = xp.sum(markers[~holes, 8].dot(markers[~holes, 5])) * self.options.ep_scale - en_fB_new /= n_mks_tot - - buffer_array = xp.array([en_fB_new]) - - if particles.mpi_comm is not None: - particles.mpi_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) - - if particles.clone_config is not None: - particles.clone_config.inter_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) - - en_fB_new = buffer_array[0] - - # calculate total energy difference - e_diff = xp.abs(en_U_new + en_fB_new - en_tot_old) - - # calculate ||z^{n+1, k} - z^{n+1, k-1|| - sum_u_diff_loc = xp.sum(xp.abs(u_new.toarray() - u_old.toarray())) - - buffer_array = xp.array([sum_u_diff_loc]) - - if particles.mpi_comm is not None: - particles.mpi_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) - - diff = buffer_array[0] - - buffer_array = xp.array([sum_H_diff_loc]) - - if particles.mpi_comm is not None: - particles.mpi_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) - - if particles.clone_config is not None: - particles.clone_config.inter_comm.Allreduce( - MPI.IN_PLACE, - buffer_array, - op=MPI.SUM, - ) - - diff += buffer_array[0] - - # check convergence - if diff < self.options.dg_solver_params.tol: - if self.options.dg_solver_params.verbose and MPI.COMM_WORLD.Get_rank() == 0: - print("converged diff: ", diff) - print("converged e_diff: ", e_diff) - - if particles.mpi_comm is not None: - particles.mpi_comm.Barrier() - break - - else: - if self.options.dg_solver_params.verbose and MPI.COMM_WORLD.Get_rank() == 0: - print("not converged diff: ", diff) - print("not converged e_diff: ", e_diff) - - if iter_num == self.options.dg_solver_params.maxiter: - if self.options.dg_solver_params.info and MPI.COMM_WORLD.Get_rank() == 0: - print( - f"{iter_num =}, maxiter={self.options.dg_solver_params.maxiter} reached! diff: {diff}, e_diff: {e_diff}", - ) - if particles.mpi_comm is not None: - particles.mpi_comm.Barrier() - break - - # sorting markers - if particles.mpi_comm is not None: - particles.mpi_sort_markers() - else: - particles.apply_kinetic_bc() - - # update u coefficients - diffs = self.update_feec_variables(u=u_new) - # clear the buffer - markers[:, first_init_idx:-2] = 0.0 + if stage == self._butcher.n_stages - 1: + self.particles[0].markers[ + ~self.particles[0].holes, + 11:-1, + ] = 0.0 + + # write new coeffs into Propagator.variables + (max_du,) = self.feec_vars_update(_u_new) - # update_weights - if self.variables.energetic_ions.species.weights_params.control_variate: - particles.update_weights() + # update_weights + if self.particles[0].control_variate: + self.particles[0].update_weights() - if self.options.dg_solver_params.info and MPI.COMM_WORLD.Get_rank() == 0: - print("Maxdiff up for CurrentCoupling5DGradB:", diffs["u"]) - print() + if self._info and self._rank == 0: + print("Maxdiff up for CurrentCoupling5DGradB:", max_du) + print() diff --git a/src/struphy/propagators/propagators_fields.py b/src/struphy/propagators/propagators_fields.py index 462c58f26..848706ecd 100644 --- a/src/struphy/propagators/propagators_fields.py +++ b/src/struphy/propagators/propagators_fields.py @@ -1,13 +1,9 @@ "Only FEEC variables are updated." -import copy +from collections.abc import Callable from copy import deepcopy -from dataclasses import dataclass -from typing import Callable, Literal, get_args -import cunumpy as xp import scipy as sc -from line_profiler import profile from matplotlib import pyplot as plt from numpy import zeros from psydac.api.essential_bc import apply_essential_bc_stencil @@ -28,46 +24,32 @@ ) from struphy.feec.linear_operators import BoundaryOperator from struphy.feec.mass import WeightedMassOperator, WeightedMassOperators -from struphy.feec.preconditioner import MassMatrixDiagonalPreconditioner, MassMatrixPreconditioner +from struphy.feec.preconditioner import MassMatrixPreconditioner from struphy.feec.projectors import L2Projector from struphy.feec.psydac_derham import Derham, SplineFunction from struphy.feec.variational_utilities import ( BracketOperator, - Hdiv0_transport_operator, + H1vecMassMatrix_density, InternalEnergyEvaluator, KineticEnergyEvaluator, - Pressure_transport_operator, ) from struphy.fields_background.equils import set_defaults from struphy.geometry.utilities import TransformedPformComponent from struphy.initial import perturbations -from struphy.io.options import ( - OptsDirectSolver, - OptsGenSolver, - OptsMassPrecond, - OptsNonlinearSolver, - OptsSaddlePointSolver, - OptsSymmSolver, - OptsVecSpace, - check_option, -) from struphy.io.setup import descend_options_dict from struphy.kinetic_background.base import Maxwellian from struphy.kinetic_background.maxwellians import GyroMaxwellian2D, Maxwellian3D from struphy.linear_algebra.saddle_point import SaddlePointSolver -from struphy.linear_algebra.schur_solver import SchurSolver, SchurSolverFull -from struphy.linear_algebra.solver import NonlinearSolverParameters, SolverParameters -from struphy.models.species import Species -from struphy.models.variables import FEECVariable, PICVariable, SPHVariable, Variable +from struphy.linear_algebra.schur_solver import SchurSolver from struphy.ode.solvers import ODEsolverFEEC -from struphy.ode.utils import ButcherTableau, OptsButcher +from struphy.ode.utils import ButcherTableau from struphy.pic.accumulation import accum_kernels, accum_kernels_gc -from struphy.pic.accumulation.filter import FilterParameters from struphy.pic.accumulation.particles_to_grid import Accumulator, AccumulatorVector from struphy.pic.base import Particles from struphy.pic.particles import Particles5D, Particles6D from struphy.polar.basic import PolarVector from struphy.propagators.base import Propagator +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel @@ -84,89 +66,52 @@ class Maxwell(Propagator): :ref:`time_discret`: Crank-Nicolson (implicit mid-point). System size reduction via :class:`~struphy.linear_algebra.schur_solver.SchurSolver`. """ - class Variables: - def __init__(self): - self._e: FEECVariable = None - self._b: FEECVariable = None - - @property - def e(self) -> FEECVariable: - return self._e - - @e.setter - def e(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hcurl" - self._e = new - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsAlgo = Literal["implicit", "explicit"] - # propagator options - algo: OptsAlgo = "implicit" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - butcher: ButcherTableau = None - - def __post_init__(self): - # checks - check_option(self.algo, self.OptsAlgo) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.algo == "explicit" and self.butcher is None: - self.butcher = ButcherTableau() + @staticmethod + def options(default=False): + dct = {} + dct["algo"] = ["implicit"] + ButcherTableau.available_methods() + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, [], verbose=False) + + return dct + + def __init__( + self, + e: BlockVector, + b: BlockVector, + *, + algo: dict = options(default=True)["algo"], + solver: dict = options(default=True)["solver"], + ): + super().__init__(e, b) + + self._algo = algo - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): # obtain needed matrices M1 = self.mass_ops.M1 M2 = self.mass_ops.M2 curl = self.derham.curl # Preconditioner for M1 + ... - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) - pc = pc_class(M1) + pc_class = getattr(preconditioner, solver["type"][1]) + pc = pc_class(self.mass_ops.M1) - if self.options.algo == "implicit": - self._info = self.options.solver_params.info + if self._algo == "implicit": + self._info = solver["info"] # Define block matrix [[A B], [C I]] (without time step size dt in the diagonals) _A = M1 @@ -180,9 +125,11 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) # pre-allocate arrays @@ -193,44 +140,43 @@ def allocate(self): # define vector field M1_inv = inverse( M1, - self.options.solver, + solver["type"][0], pc=pc, - tol=self.options.solver_params.tol, - maxiter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) weak_curl = M1_inv @ curl.T @ M2 # allocate output of vector field - out1 = self.variables.e.spline.vector.space.zeros() - out2 = self.variables.b.spline.vector.space.zeros() + out1 = e.space.zeros() + out2 = b.space.zeros() - def f1(t, y1, y2, out: BlockVector = out1): + def f1(t, y1, y2, out=out1): weak_curl.dot(y2, out=out) out.update_ghost_regions() return out - def f2(t, y1, y2, out: BlockVector = out2): + def f2(t, y1, y2, out=out2): curl.dot(y1, out=out) out *= -1.0 out.update_ghost_regions() return out - vector_field = {self.variables.e.spline.vector: f1, self.variables.b.spline.vector: f2} - self._ode_solver = ODEsolverFEEC(vector_field, butcher=self.options.butcher) + vector_field = {e: f1, b: f2} + self._ode_solver = ODEsolverFEEC(vector_field, algo=algo) # allocate place-holder vectors to avoid temporary array allocations in __call__ - self._e_tmp1 = self.variables.e.spline.vector.space.zeros() - self._e_tmp2 = self.variables.e.spline.vector.space.zeros() - self._b_tmp1 = self.variables.b.spline.vector.space.zeros() + self._e_tmp1 = e.space.zeros() + self._e_tmp2 = e.space.zeros() + self._b_tmp1 = b.space.zeros() - @profile def __call__(self, dt): - # current FE coeffs - en = self.variables.e.spline.vector - bn = self.variables.b.spline.vector + # current variables + en = self.feec_vars[0] + bn = self.feec_vars[1] - if self.options.algo == "implicit": + if self._algo == "implicit": # solve for new e coeffs self._B.dot(bn, out=self._byn) @@ -243,16 +189,17 @@ def __call__(self, dt): bn1 *= -dt bn1 += bn - diffs = self.update_feec_variables(e=en1, b=bn1) + # write new coeffs into self.feec_vars + max_de, max_db = self.feec_vars_update(en1, bn1) else: self._ode_solver(0.0, dt) if self._info and MPI.COMM_WORLD.Get_rank() == 0: - if self.options.algo == "implicit": + if self._algo == "implicit": print("Status for Maxwell:", info["success"]) print("Iterations for Maxwell:", info["niter"]) - print("Maxdiff e for Maxwell:", diffs["e"]) - print("Maxdiff b for Maxwell:", diffs["b"]) + print("Maxdiff e1 for Maxwell:", max_de) + print("Maxdiff b2 for Maxwell:", max_db) print() @@ -284,71 +231,39 @@ class OhmCold(Propagator): \end{bmatrix} \,. """ - class Variables: - def __init__(self): - self._j: FEECVariable = None - self._e: FEECVariable = None - - @property - def j(self) -> FEECVariable: - return self._j - - @j.setter - def j(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hcurl" - self._j = new - - @property - def e(self) -> FEECVariable: - return self._e - - @e.setter - def e(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hcurl" - self._e = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) + + return dct - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._info = self.options.solver_params.info - - self._alpha = self.variables.j.species.equation_params.alpha - self._epsilon = self.variables.j.species.equation_params.epsilon + def __init__( + self, + j: BlockVector, + e: BlockVector, + *, + alpha: float = 1.0, + epsilon: float = 1.0, + solver: dict = options(default=True)["solver"], + ): + super().__init__(e, j) + + self._info = solver["info"] + self._alpha = alpha + self._epsilon = epsilon # Define block matrix [[A B], [C I]] (without time step size dt in the diagonals) _A = self.mass_ops.M1ninv @@ -356,10 +271,10 @@ def allocate(self): self._B = -1 / 2 * 1 / self._epsilon * self.mass_ops.M1 # no dt # Preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(self.mass_ops.M1ninv) # Instantiate Schur solver (constant in this case) @@ -368,14 +283,13 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) - j = self.variables.j.spline.vector - e = self.variables.e.spline.vector - self._tmp_j1 = j.space.zeros() self._tmp_j2 = j.space.zeros() self._tmp_e1 = e.space.zeros() @@ -383,8 +297,8 @@ def allocate(self): def __call__(self, dt): # current variables - jn = self.variables.j.spline.vector - en = self.variables.e.spline.vector + en = self.feec_vars[0] + jn = self.feec_vars[1] # in-place solution (no tmps created here) Ben = self._B.dot(en, out=self._tmp_e1) @@ -398,13 +312,13 @@ def __call__(self, dt): en1 += en # write new coeffs into Propagator.variables - diffs = self.update_feec_variables(e=en1, j=jn1) + max_de, max_dj = self.feec_vars_update(en1, jn1) if self._info: print("Status for OhmCold:", info["success"]) print("Iterations for OhmCold:", info["niter"]) - print("Maxdiff e1 for OhmCold:", diffs["e"]) - print("Maxdiff j1 for OhmCold:", diffs["j"]) + print("Maxdiff e1 for OhmCold:", max_de) + print("Maxdiff j1 for OhmCold:", max_dj) print() @@ -424,90 +338,65 @@ class JxBCold(Propagator): \mathbb M_{1/n_0} \left( \mathbf j^{n+1} - \mathbf j^n \right) = \frac{\Delta t}{2} \frac{1}{\varepsilon} \mathbb M_{B_0/n_0} \left( \mathbf j^{n+1} - \mathbf j^n \right)\,. """ - class Variables: - def __init__(self): - self._j: FEECVariable = None - - @property - def j(self) -> FEECVariable: - return self._j - - @j.setter - def j(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hcurl" - self._j = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) - def __post_init__(self): - # checks - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) + return dct - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() + def __init__( + self, + j: BlockVector, + *, + epsilon: float = 1.0, + solver: dict = options(default=True)["solver"], + ): + super().__init__(j) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._info = self.options.solver_params.info - - epsilon = self.variables.j.species.equation_params.epsilon + self._info = solver["info"] # mass matrix in system (M - dt/2 * A)*j^(n + 1) = (M + dt/2 * A)*j^n self._M = self.mass_ops.M1ninv self._A = -1 / epsilon * self.mass_ops.M1Bninv # no dt # Preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(self.mass_ops.M1ninv) # Instantiate linear solver self._solver = inverse( self._M, - self.options.solver, + solver["type"][0], pc=pc, - x0=self.variables.j.spline.vector, - tol=self.options.solver_params.tol, - maxiter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, + x0=self.feec_vars[0], + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) # allocate dummy vectors to avoid temporary array allocations self._rhs_j = self._M.codomain.zeros() - self._j_new = self.variables.j.spline.vector.space.zeros() + self._j_new = j.space.zeros() - @profile def __call__(self, dt): # current variables - jn = self.variables.j.spline.vector + jn = self.feec_vars[0] # define system (M - dt/2 * A)*b^(n + 1) = (M + dt/2 * A)*b^n lhs = self._M - dt / 2.0 * self._A @@ -522,7 +411,7 @@ def __call__(self, dt): info = self._solver._info # write new coeffs into Propagator.variables - max_dj = self.update_feec_variables(j=jn1) + max_dj = self.feec_vars_update(jn1)[0] if self._info: print("Status for FluidCold:", info["success"]) @@ -555,78 +444,42 @@ class ShearAlfven(Propagator): the MHD equilibirum density. The solution of the above system is based on the :ref:`Schur complement `. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - self._b: FEECVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsAlgo = Literal["implicit", "explicit"] - # propagator options - u_space: OptsVecSpace = "Hdiv" - algo: OptsAlgo = "implicit" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - butcher: ButcherTableau = None - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - check_option(self.algo, self.OptsAlgo) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.algo == "explicit" and self.butcher is None: - self.butcher = ButcherTableau() + @staticmethod + def options(default=False): + dct = {} + dct["algo"] = ["implicit", "rk4", "forward_euler", "heun2", "rk2", "heun3"] + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["turn_off"] = False + + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - u_space = self.options.u_space + return dct + + def __init__( + self, + u: BlockVector, + b: BlockVector, + *, + u_space: str, + algo: dict = options(default=True)["algo"], + solver: dict = options(default=True)["solver"], + ): + super().__init__(u, b) + + assert u_space in {"Hcurl", "Hdiv", "H1vec"} + + self._algo = algo # define block matrix [[A B], [C I]] (without time step size dt in the diagonals) id_M = "M" + self.derham.space_to_form[u_space] + "n" @@ -639,14 +492,14 @@ def allocate(self): curl = self.derham.curl # Preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(getattr(self.mass_ops, id_M)) - if self.options.algo == "implicit": - self._info = self.options.solver_params.info + if self._algo == "implicit": + self._info = solver["info"] self._B = -1 / 2 * _T.T @ curl.T @ _M2 self._C = 1 / 2 * curl @ _T @@ -657,9 +510,12 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], ) # pre-allocate arrays @@ -671,45 +527,44 @@ def allocate(self): # define vector field A_inv = inverse( _A, - self.options.solver, + solver["type"][0], pc=pc, - tol=self.options.solver_params.tol, - maxiter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) _f1 = A_inv @ _T.T @ curl.T @ _M2 _f2 = curl @ _T # allocate output of vector field - out1 = self.variables.u.spline.vector.space.zeros() - out2 = self.variables.b.spline.vector.space.zeros() + out1 = u.space.zeros() + out2 = b.space.zeros() - def f1(t, y1, y2, out: BlockVector = out1): + def f1(t, y1, y2, out=out1): _f1.dot(y2, out=out) out.update_ghost_regions() return out - def f2(t, y1, y2, out: BlockVector = out2): + def f2(t, y1, y2, out=out2): _f2.dot(y1, out=out) out *= -1.0 out.update_ghost_regions() return out - vector_field = {self.variables.u.spline.vector: f1, self.variables.b.spline.vector: f2} - self._ode_solver = ODEsolverFEEC(vector_field, butcher=self.options.butcher) + vector_field = {u: f1, b: f2} + self._ode_solver = ODEsolverFEEC(vector_field, algo=algo) # allocate dummy vectors to avoid temporary array allocations - self._u_tmp1 = self.variables.u.spline.vector.space.zeros() - self._u_tmp2 = self.variables.u.spline.vector.space.zeros() - self._b_tmp1 = self.variables.b.spline.vector.space.zeros() + self._u_tmp1 = u.space.zeros() + self._u_tmp2 = u.space.zeros() + self._b_tmp1 = b.space.zeros() - @profile def __call__(self, dt): - # current FE coeffs - un = self.variables.u.spline.vector - bn = self.variables.b.spline.vector + # current variables + un = self.feec_vars[0] + bn = self.feec_vars[1] - if self.options.algo == "implicit": + if self._algo == "implicit": # solve for new u coeffs byn = self._B.dot(bn, out=self._byn) @@ -722,16 +577,18 @@ def __call__(self, dt): bn1 *= -dt bn1 += bn - diffs = self.update_feec_variables(u=un1, b=bn1) + # write new coeffs into self.feec_vars + max_du, max_db = self.feec_vars_update(un1, bn1) + else: self._ode_solver(0.0, dt) if self._info and MPI.COMM_WORLD.Get_rank() == 0: - if self.options.algo == "implicit": + if self._algo == "implicit": print("Status for ShearAlfven:", info["success"]) print("Iterations for ShearAlfven:", info["niter"]) - print("Maxdiff up for ShearAlfven:", diffs["u"]) - print("Maxdiff b2 for ShearAlfven:", diffs["b"]) + print("Maxdiff up for ShearAlfven:", max_du) + print("Maxdiff b2 for ShearAlfven:", max_db) print() @@ -758,91 +615,62 @@ class ShearAlfvenB1(Propagator): the MHD equilibirum density. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - self._b: FEECVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hdiv") - self._u = new - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hcurl" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - solver_M1: OptsSymmSolver = "pcg" - precond_M1: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params_M1: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - check_option(self.solver_M1, OptsSymmSolver) - check_option(self.precond_M1, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.solver_params_M1 is None: - self.solver_params_M1 = SolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["solver_M1"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) + + return dct - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._info = self.options.solver_params.info + def __init__( + self, + u: BlockVector, + b: BlockVector, + *, + solver: dict = options(default=True)["solver"], + solver_M1: dict = options(default=True)["solver_M1"], + ): + super().__init__(u, b) + + self._info = solver["info"] # define inverse of M1 - if self.options.precond_M1 is None: + if solver_M1["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond_M1) + pc_class = getattr(preconditioner, solver_M1["type"][1]) pc = pc_class(self.mass_ops.M1) M1_inv = inverse( self.mass_ops.M1, - self.options.solver_M1, + solver_M1["type"][0], pc=pc, - tol=self.options.solver_params_M1.tol, - maxiter=self.options.solver_params_M1.maxiter, - verbose=self.options.solver_params_M1.verbose, + tol=solver_M1["tol"], + maxiter=solver_M1["maxiter"], + verbose=solver_M1["verbose"], ) # define block matrix [[A B], [C I]] (without time step size dt in the diagonals) @@ -852,10 +680,10 @@ def allocate(self): self._C = 1 / 2 * M1_inv @ self.derham.curl.T @ self.mass_ops.M2B # Preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(getattr(self.mass_ops, "M2n")) # instantiate Schur solver (constant in this case) @@ -864,26 +692,24 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) # allocate dummy vectors to avoid temporary array allocations - u = self.variables.u.spline.vector - b = self.variables.b.spline.vector - self._u_tmp1 = u.space.zeros() self._u_tmp2 = u.space.zeros() self._b_tmp1 = b.space.zeros() self._byn = self._B.codomain.zeros() - @profile def __call__(self, dt): # current variables - un = self.variables.u.spline.vector - bn = self.variables.b.spline.vector + un = self.feec_vars[0] + bn = self.feec_vars[1] # solve for new u coeffs byn = self._B.dot(bn, out=self._byn) @@ -898,13 +724,13 @@ def __call__(self, dt): bn1 += bn # write new coeffs into self.feec_vars - max_diffs = self.update_feec_variables(u=un1, b=bn1) + max_du, max_db = self.feec_vars_update(un1, bn1) if self._info and MPI.COMM_WORLD.Get_rank() == 0: print("Status for ShearAlfvenB1:", info["success"]) print("Iterations for ShearAlfvenB1:", info["niter"]) - print("Maxdiff up for ShearAlfvenB1:", max_diffs["u"]) - print("Maxdiff b2 for ShearAlfvenB1:", max_diffs["b"]) + print("Maxdiff up for ShearAlfvenB1:", max_du) + print("Maxdiff b2 for ShearAlfvenB1:", max_db) print() @@ -928,66 +754,38 @@ class Hall(Propagator): The solution of the above system is based on the Pre-conditioned Biconjugate Gradient Stabilized algortihm (PBiConjugateGradientStab). """ - class Variables: - def __init__(self): - self._b: FEECVariable = None - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hcurl" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - solver: OptsGenSolver = "pbicgstab" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - epsilon_from: Species = None - - def __post_init__(self): - # checks - check_option(self.solver, OptsGenSolver) - check_option(self.precond, OptsMassPrecond) + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pbicgstab", "MassMatrixPreconditioner"), + ("bicgstab", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() + return dct - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - if self.options.epsilon_from is None: - epsilon = 1.0 - else: - epsilon = self.options.epsilon_from.equation_params.epsilon + def __init__( + self, + b: BlockVector, + *, + epsilon: float = 1.0, + solver: dict = options(default=True)["solver"], + ): + super().__init__(b) - self._info = self.options.solver_params.info - self._tol = self.options.solver_params.tol - self._maxiter = self.options.solver_params.maxiter - self._verbose = self.options.solver_params.verbose + self._info = solver["info"] + self._tol = solver["tol"] + self._maxiter = solver["maxiter"] + self._verbose = solver["verbose"] # mass matrix in system (M - dt/2 * A)*b^(n + 1) = (M + dt/2 * A)*b^n id_M = "M1" @@ -997,18 +795,18 @@ def allocate(self): self._A = 1.0 / epsilon * self.derham.curl.T @ self._M2Bn @ self.derham.curl # Preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(getattr(self.mass_ops, id_M)) # Instantiate linear solver self._solver = inverse( self._M, - self.options.solver, + solver["type"][0], pc=pc, - x0=self.variables.b.spline.vector, + x0=self.feec_vars[0], tol=self._tol, maxiter=self._maxiter, verbose=self._verbose, @@ -1016,11 +814,11 @@ def allocate(self): # allocate dummy vectors to avoid temporary array allocations self._rhs_b = self._M.codomain.zeros() - self._b_new = self.variables.b.spline.vector.space.zeros() + self._b_new = b.space.zeros() def __call__(self, dt): # current variables - bn = self.variables.b.spline.vector + bn = self.feec_vars[0] # define system (M - dt/2 * A)*b^(n + 1) = (M + dt/2 * A)*b^n lhs = self._M - dt / 2.0 * self._A @@ -1034,12 +832,12 @@ def __call__(self, dt): info = self._solver._info # write new coeffs into self.feec_vars - max_db = self.update_feec_variables(b=bn1) + max_db = self.feec_vars_update(bn1) if self._info and MPI.COMM_WORLD.Get_rank() == 0: print("Status for Hall:", info["success"]) print("Iterations for Hall:", info["niter"]) - print("Maxdiff b1 for Hall:", max_db["b"]) + print("Maxdiff b1 for Hall:", max_db) print() @@ -1077,85 +875,42 @@ class Magnetosonic(Propagator): \boldsymbol{\rho}^{n+1} = \boldsymbol{\rho}^n - \frac{\Delta t}{2} \mathbb D \mathcal Q^\alpha (\mathbf u^{n+1} + \mathbf u^n) \,. """ - class Variables: - def __init__(self): - self._n: FEECVariable = None - self._u: FEECVariable = None - self._p: FEECVariable = None - - @property - def n(self) -> FEECVariable: - return self._n - - @n.setter - def n(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._n = new - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - @property - def p(self) -> FEECVariable: - return self._p - - @p.setter - def p(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._p = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - b_field: FEECVariable = None - u_space: OptsVecSpace = "Hdiv" - solver: OptsGenSolver = "pbicgstab" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - check_option(self.solver, OptsGenSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.b_field is None: - self.b_field = FEECVariable(space="Hdiv") - if self.solver_params is None: - self.solver_params = SolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pbicgstab", "MassMatrixPreconditioner"), + ("bicgstab", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["turn_off"] = False + + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - u_space = self.options.u_space - - self._info = self.options.solver_params.info + return dct + + def __init__( + self, + n: StencilVector, + u: BlockVector, + p: StencilVector, + *, + u_space: str, + b: BlockVector, + solver: dict = options(default=True)["solver"], + ): + super().__init__(n, u, p) + + assert u_space in {"Hcurl", "Hdiv", "H1vec"} + + self._info = solver["info"] self._bc = self.derham.dirichlet_bc # define block matrix [[A B], [C I]] (without time step size dt in the diagonals) @@ -1174,8 +929,7 @@ def allocate(self): _K = getattr(self.basis_ops, id_K) if id_U is None: - _U = IdentityOperator(self.variables.u.spline.vector.space) - _UT = IdentityOperator(self.variables.u.spline.vector.space) + _U, _UT = IdentityOperator(u.space), IdentityOperator(u.space) else: _U = getattr(self.basis_ops, id_U) _UT = _U.T @@ -1186,14 +940,13 @@ def allocate(self): self._MJ = getattr(self.mass_ops, id_MJ) self._DQ = self.derham.div @ getattr(self.basis_ops, id_Q) - self.options.b_field.allocate(self.derham, self.domain) - self._b = self.options.b_field.spline.vector + self._b = b # preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(getattr(self.mass_ops, id_Mn)) # instantiate Schur solver (constant in this case) @@ -1202,27 +955,29 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], ) # allocate dummy vectors to avoid temporary array allocations - self._u_tmp1 = self.variables.u.spline.vector.space.zeros() - self._u_tmp2 = self.variables.u.spline.vector.space.zeros() - self._p_tmp1 = self.variables.p.spline.vector.space.zeros() - self._n_tmp1 = self.variables.n.spline.vector.space.zeros() + self._u_tmp1 = u.space.zeros() + self._u_tmp2 = u.space.zeros() + self._p_tmp1 = p.space.zeros() + self._n_tmp1 = n.space.zeros() self._b_tmp1 = self._b.space.zeros() self._byn1 = self._B.codomain.zeros() self._byn2 = self._B.codomain.zeros() - @profile def __call__(self, dt): - # current FE coeffs - nn = self.variables.n.spline.vector - un = self.variables.u.spline.vector - pn = self.variables.p.spline.vector + # current variables + nn = self.feec_vars[0] + un = self.feec_vars[1] + pn = self.feec_vars[2] # solve for new u coeffs (no tmps created here) byn1 = self._B.dot(pn, out=self._byn1) @@ -1243,14 +998,19 @@ def __call__(self, dt): nn1 *= -dt / 2 nn1 += nn - diffs = self.update_feec_variables(n=nn1, u=un1, p=pn1) + # write new coeffs into self.feec_vars + max_dn, max_du, max_dp = self.feec_vars_update( + nn1, + un1, + pn1, + ) if self._info and MPI.COMM_WORLD.Get_rank() == 0: print("Status for Magnetosonic:", info["success"]) print("Iterations for Magnetosonic:", info["niter"]) - print("Maxdiff n3 for Magnetosonic:", diffs["n"]) - print("Maxdiff up for Magnetosonic:", diffs["u"]) - print("Maxdiff p3 for Magnetosonic:", diffs["p"]) + print("Maxdiff n3 for Magnetosonic:", max_dn) + print("Maxdiff up for Magnetosonic:", max_du) + print("Maxdiff p3 for Magnetosonic:", max_dp) print() @@ -1305,78 +1065,36 @@ class MagnetosonicUniform(Propagator): Solver- and/or other parameters for this splitting step. """ - class Variables: - def __init__(self): - self._n: FEECVariable = None - self._u: FEECVariable = None - self._p: FEECVariable = None - - @property - def n(self) -> FEECVariable: - return self._n - - @n.setter - def n(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._n = new - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - @property - def p(self) -> FEECVariable: - return self._p - - @p.setter - def p(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._p = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - solver: OptsGenSolver = "pbicgstab" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.solver, OptsGenSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pbicgstab", "MassMatrixPreconditioner"), + ("bicgstab", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._info = self.options.solver_params.info + return dct + + def __init__( + self, + n: StencilVector, + u: BlockVector, + p: StencilVector, + *, + solver: dict = options(default=True)["solver"], + ): + super().__init__(n, u, p) + + self._info = solver["info"] self._bc = self.derham.dirichlet_bc # define block matrix [[A B], [C I]] (without time step size dt in the diagonals) @@ -1392,10 +1110,10 @@ def allocate(self): self._QD = getattr(self.basis_ops, id_Q) @ self.derham.div # preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(getattr(self.mass_ops, id_Mn)) # instantiate Schur solver (constant in this case) @@ -1404,16 +1122,14 @@ def allocate(self): self._schur_solver = SchurSolver( _A, _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], ) # allocate dummy vectors to avoid temporary array allocations - n = self.variables.n.spline.vector - u = self.variables.u.spline.vector - p = self.variables.p.spline.vector - self._u_tmp1 = u.space.zeros() self._u_tmp2 = u.space.zeros() self._p_tmp1 = p.space.zeros() @@ -1421,12 +1137,11 @@ def allocate(self): self._byn1 = self._B.codomain.zeros() - @profile def __call__(self, dt): # current variables - nn = self.variables.n.spline.vector - un = self.variables.u.spline.vector - pn = self.variables.p.spline.vector + nn = self.feec_vars[0] + un = self.feec_vars[1] + pn = self.feec_vars[2] # solve for new u coeffs byn1 = self._B.dot(pn, out=self._byn1) @@ -1445,14 +1160,18 @@ def __call__(self, dt): nn1 += nn # write new coeffs into self.feec_vars - diffs = self.update_feec_variables(n=nn1, u=un1, p=pn1) + max_dn, max_du, max_dp = self.feec_vars_update( + nn1, + un1, + pn1, + ) if self._info and MPI.COMM_WORLD.Get_rank() == 0: print("Status for Magnetosonic:", info["success"]) print("Iterations for Magnetosonic:", info["niter"]) - print("Maxdiff n3 for Magnetosonic:", diffs["n"]) - print("Maxdiff up for Magnetosonic:", diffs["u"]) - print("Maxdiff p3 for Magnetosonic:", diffs["p"]) + print("Maxdiff n3 for Magnetosonic:", max_dn) + print("Maxdiff up for Magnetosonic:", max_du) + print("Maxdiff p3 for Magnetosonic:", max_dp) print() @@ -1540,13 +1259,13 @@ def __init__(self, a, **params): ] # Initialize Accumulator object for getting density from particles - self._pts_x = 1.0 / (2.0 * self.derham.Nel[0]) * xp.polynomial.legendre.leggauss( + self._pts_x = 1.0 / (2.0 * self.derham.Nel[0]) * np.polynomial.legendre.leggauss( self._nqs[0], )[0] + 1.0 / (2.0 * self.derham.Nel[0]) - self._pts_y = 1.0 / (2.0 * self.derham.Nel[1]) * xp.polynomial.legendre.leggauss( + self._pts_y = 1.0 / (2.0 * self.derham.Nel[1]) * np.polynomial.legendre.leggauss( self._nqs[1], )[0] + 1.0 / (2.0 * self.derham.Nel[1]) - self._pts_z = 1.0 / (2.0 * self.derham.Nel[2]) * xp.polynomial.legendre.leggauss( + self._pts_z = 1.0 / (2.0 * self.derham.Nel[2]) * np.polynomial.legendre.leggauss( self._nqs[2], )[0] + 1.0 / (2.0 * self.derham.Nel[2]) @@ -1586,15 +1305,15 @@ def __call__(self, dt): self._accum_density.accumulate( self._particles, - xp.array(self.derham.Nel), - xp.array(self._nqs), - xp.array( + np.array(self.derham.Nel), + np.array(self._nqs), + np.array( self._pts_x, ), - xp.array(self._pts_y), - xp.array(self._pts_z), - xp.array(self._p_shape), - xp.array(self._p_size), + np.array(self._pts_y), + np.array(self._pts_z), + np.array(self._p_shape), + np.array(self._p_size), ) self._accum_potential.accumulate(self._particles) @@ -1687,70 +1406,65 @@ class CurrentCoupling6DDensity(Propagator): :ref:`time_discret`: Crank-Nicolson (implicit mid-point). """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - energetic_ions: PICVariable = None - b_tilde: FEECVariable = None - u_space: OptsVecSpace = "Hdiv" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - filter_params: FilterParameters = None - boundary_cut: tuple = (0.0, 0.0, 0.0) - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - assert self.energetic_ions.space == "Particles6D" - assert self.b_tilde.space == "Hdiv" - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pbicgstab", "MassMatrixPreconditioner"), + ("bicgstab", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["filter"] = { + "use_filter": None, + "modes": (1), + "repeat": 1, + "alpha": 0.5, + } + dct["boundary_cut"] = { + "e1": 0.0, + "e2": 0.0, + "e3": 0.0, + } + dct["turn_off"] = False + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + u: BlockVector, + *, + particles: Particles6D, + u_space: str, + b_eq: BlockVector | PolarVector, + b_tilde: BlockVector | PolarVector, + Ab: int = 1, + Ah: int = 1, + epsilon: float = 1.0, + solver: dict = options(default=True)["solver"], + filter: dict = options(default=True)["filter"], + boundary_cut: dict = options(default=True)["boundary_cut"], + ): + super().__init__(u) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._space_key_int = int(self.derham.space_to_form[self.options.u_space]) - - particles = self.options.energetic_ions.particles - u = self.variables.u.spline.vector - self._b_eq = self.projected_equil.b2 - self._b_tilde = self.options.b_tilde.spline.vector + # assert parameters and expose some quantities to self + if u_space == "H1vec": + self._space_key_int = 0 + else: + self._space_key_int = int( + self.derham.space_to_form[u_space], + ) + + self._particles = particles + self._b_eq = b_eq + self._b_tilde = b_tilde # if self._particles.control_variate: @@ -1766,70 +1480,65 @@ def allocate(self): # self._particles.f0.n, *quad_pts, kind='3', squeeze_out=False) # # memory allocation of magnetic field at quadrature points - # self._b_quad1 = xp.zeros_like(self._nh0_at_quad) - # self._b_quad2 = xp.zeros_like(self._nh0_at_quad) - # self._b_quad3 = xp.zeros_like(self._nh0_at_quad) + # self._b_quad1 = np.zeros_like(self._nh0_at_quad) + # self._b_quad2 = np.zeros_like(self._nh0_at_quad) + # self._b_quad3 = np.zeros_like(self._nh0_at_quad) # # memory allocation for self._b_quad x self._nh0_at_quad * self._coupling_const - # self._mat12 = xp.zeros_like(self._nh0_at_quad) - # self._mat13 = xp.zeros_like(self._nh0_at_quad) - # self._mat23 = xp.zeros_like(self._nh0_at_quad) - - # self._mat21 = xp.zeros_like(self._nh0_at_quad) - # self._mat31 = xp.zeros_like(self._nh0_at_quad) - # self._mat32 = xp.zeros_like(self._nh0_at_quad) + # self._mat12 = np.zeros_like(self._nh0_at_quad) + # self._mat13 = np.zeros_like(self._nh0_at_quad) + # self._mat23 = np.zeros_like(self._nh0_at_quad) - self._type = self.options.solver - self._tol = self.options.solver_params.tol - self._maxiter = self.options.solver_params.maxiter - self._info = self.options.solver_params.info - self._verbose = self.options.solver_params.verbose - self._recycle = self.options.solver_params.recycle + # self._mat21 = np.zeros_like(self._nh0_at_quad) + # self._mat31 = np.zeros_like(self._nh0_at_quad) + # self._mat32 = np.zeros_like(self._nh0_at_quad) - Ah = self.options.energetic_ions.species.mass_number - Ab = self.variables.u.species.mass_number - epsilon = self.options.energetic_ions.species.equation_params.epsilon + self._type = solver["type"][0] + self._tol = solver["tol"] + self._maxiter = solver["maxiter"] + self._info = solver["info"] + self._verbose = solver["verbose"] self._coupling_const = Ah / Ab / epsilon - self._boundary_cut_e1 = self.options.boundary_cut[0] + self._boundary_cut_e1 = boundary_cut["e1"] # load accumulator self._accumulator = Accumulator( particles, - self.options.u_space, + u_space, Pyccelkernel(accum_kernels.cc_lin_mhd_6d_1), self.mass_ops, self.domain.args_domain, add_vector=False, symmetry="asym", - filter_params=self.options.filter_params, + filter_params=filter, ) # transposed extraction operator PolarVector --> BlockVector (identity map in case of no polar splines) self._E2T = self.derham.extraction_ops["2"].transpose() # mass matrix in system (M - dt/2 * A)*u^(n + 1) = (M + dt/2 * A)*u^n - u_id = self.derham.space_to_form[self.options.u_space] + u_id = self.derham.space_to_form[u_space] self._M = getattr(self.mass_ops, "M" + u_id + "n") # preconditioner - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(self._M) # linear solver self._solver = inverse( self._M, - self.options.solver, + solver["type"][0], pc=pc, - x0=self.variables.u.spline.vector, + x0=self.feec_vars[0], tol=self._tol, maxiter=self._maxiter, verbose=self._verbose, - recycle=self._recycle, + recycle=solver["recycle"], ) # temporary vectors to avoid memory allocation @@ -1841,7 +1550,7 @@ def allocate(self): def __call__(self, dt): # pointer to old coefficients - un = self.variables.u.spline.vector + un = self.feec_vars[0] # sum up total magnetic field b_full1 = b_eq + b_tilde (in-place) self._b_eq.copy(out=self._b_full1) @@ -1904,7 +1613,7 @@ def __call__(self, dt): info = self._solver._info # write new coeffs into Propagator.variables - max_du = self.update_feec_variables(u=un1) + max_du = self.feec_vars_update(un1) if self._info and MPI.COMM_WORLD.Get_rank() == 0: print("Status for CurrentCoupling6DDensity:", info["success"]) @@ -1921,9 +1630,9 @@ class ShearAlfvenCurrentCoupling5D(Propagator): \left\{ \begin{aligned} - \int \rho_0 &\frac{\partial \tilde{\mathbf U}}{\partial t} \cdot \mathbf V \, \textnormal{d} \mathbf{x} = \int \left(\tilde{\mathbf B} - \frac{A_\textnormal{h}}{A_b} \iint f^\text{vol} \mu \mathbf{b}_0\textnormal{d} \mu \textnormal{d} v_\parallel \right) \cdot \nabla \times (\tilde{\mathbf B} \times \mathbf V) \, \textnormal{d} \mathbf{x} \quad \forall \, \mathbf V \in \{H(\textnormal{curl}), H(\textnormal{div}), (H^1)^3\}\,, \,, + \int \rho_0 &\frac{\partial \tilde{\mathbf U}}{\partial t} \cdot \mathbf V \, \textnormal{d} \mathbf{x} = \int \left(\tilde{\mathbf B} - \frac{A_\textnormal{h}}{A_b} \iint f^\text{vol} \mu \mathbf{b}_0\textnormal{d} \mu \textnormal{d} v_\parallel \right) \cdot \nabla \times (\mathbf B_0 \times \mathbf V) \, \textnormal{d} \mathbf{x} \quad \forall \, \mathbf V \in \{H(\textnormal{curl}), H(\textnormal{div}), (H^1)^3\}\,, \,, \\ - &\frac{\partial \tilde{\mathbf B}}{\partial t} = - \nabla \times (\tilde{\mathbf B} \times \tilde{\mathbf U}) \,. + &\frac{\partial \tilde{\mathbf B}}{\partial t} = - \nabla \times (\mathbf B_0 \times \tilde{\mathbf U}) \,. \end{aligned} \right. @@ -1936,242 +1645,499 @@ class ShearAlfvenCurrentCoupling5D(Propagator): \end{bmatrix} = \frac{\Delta t}{2} \,. \begin{bmatrix} - 0 & (\mathbb M^{2,n})^{-1} \mathcal {T^2}^\top \mathbb C^\top \\ - \mathbb C \mathcal {T^2} (\mathbb M^{2,n})^{-1} & 0 + 0 & (\mathbb M^{\alpha,n})^{-1} \mathcal {T^\alpha}^\top \mathbb C^\top \\ - \mathbb C \mathcal {T^\alpha} (\mathbb M^{\alpha,n})^{-1} & 0 \end{bmatrix} \begin{bmatrix} - {\mathbb M^{2,n}}(\mathbf u^{n+1} + \mathbf u^n) \\ \mathbb M_2(\mathbf b^{n+1} + \mathbf b^n) + \sum_k^{N_p} \omega_k \mu_k \hat{\mathbf b}¹_0 (\boldsymbol \eta_k) \cdot \left(\frac{1}{\sqrt{g(\boldsymbol \eta_k)}} \vec \Lambda² (\boldsymbol \eta_k) \right) + {\mathbb M^{\alpha,n}}(\mathbf u^{n+1} + \mathbf u^n) \\ \mathbb M_2(\mathbf b^{n+1} + \mathbf b^n) + \sum_k^{N_p} \omega_k \mu_k \hat{\mathbf b}¹_0 (\boldsymbol \eta_k) \cdot \left(\frac{1}{\sqrt{g(\boldsymbol \eta_k)}} \vec \Lambda² (\boldsymbol \eta_k) \right) \end{bmatrix} \,, where - :math:`\mathcal{T}^2 = \hat \Pi \left[\frac{\tilde{\mathbf B}^2}{\sqrt{g} \times \vec \Lambda^2\right]` and - :math:`\mathbb M^{2,n}` is a :class:`~struphy.feec.mass.WeightedMassOperators` being weighted with :math:`\rho_\text{eq}`, the MHD equilibirum density. + :math:`\mathcal{T}^\alpha` is a :class:`~struphy.feec.basis_projection_ops.BasisProjectionOperators` and + :math:`\mathbb M^{\alpha,n}` is a :class:`~struphy.feec.mass.WeightedMassOperators` being weighted with :math:`\rho_\text{eq}`, the MHD equilibirum density. + :math:`\alpha \in \{1, 2, v\}` denotes the :math:`\alpha`-form space where the operators correspond to. + Moreover, :math:`\sum_k^{N_p} \omega_k \mu_k \hat{\mathbf b}¹_0 (\boldsymbol \eta_k) \cdot \left(\frac{1}{\sqrt{g(\boldsymbol \eta_k)}} \vec \Lambda² (\boldsymbol \eta_k)\right)` is accumulated by the kernel :class:`~struphy.pic.accumulation.accum_kernels_gc.cc_lin_mhd_5d_M`. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - self._b: FEECVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsAlgo = Literal["implicit", "explicit"] - # propagator options - energetic_ions: PICVariable = None - ep_scale: float = 1.0 - u_space: OptsVecSpace = "Hdiv" - algo: OptsAlgo = "implicit" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixDiagonalPreconditioner" - solver_params: SolverParameters = None - filter_params: FilterParameters = None - butcher: ButcherTableau = None - nonlinear: bool = True - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - check_option(self.algo, self.OptsAlgo) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - assert isinstance(self.energetic_ions, PICVariable) - assert self.energetic_ions.space == "Particles5D" - assert isinstance(self.ep_scale, float) - assert isinstance(self.nonlinear, bool) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.filter_params is None: - self.filter_params = FilterParameters() - - if self.algo == "explicit" and self.butcher is None: - self.butcher = ButcherTableau() + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["filter"] = { + "use_filter": None, + "modes": (1), + "repeat": 1, + "alpha": 0.5, + } + dct["boundary_cut"] = { + "e1": 0.0, + "e2": 0.0, + "e3": 0.0, + } + dct["turn_off"] = False - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._u_form = self.derham.space_to_form[self.options.u_space] - - # call operatros - id_M = "M" + self._u_form + "n" - id_T = "T" + self._u_form + if default: + dct = descend_options_dict(dct, []) - _A = getattr(self.mass_ops, id_M) - _T = getattr(self.basis_ops, id_T) - M2 = self.mass_ops.M2 - curl = self.derham.curl - PB = getattr(self.basis_ops, "PB") + return dct - # define Accumulator and arguments - self._ACC = AccumulatorVector( - self.options.energetic_ions.particles, - "H1", - Pyccelkernel(accum_kernels_gc.gc_mag_density_0form), - self.mass_ops, - self.domain.args_domain, - filter_params=self.options.filter_params, - ) + def __init__( + self, + u: BlockVector, + b: BlockVector, + *, + particles: Particles5D, + absB0: StencilVector, + unit_b1: BlockVector, + u_space: str, + solver: dict = options(default=True)["solver"], + filter: dict = options(default=True)["filter"], + coupling_params: dict, + accumulated_magnetization: BlockVector, + boundary_cut: dict = options(default=True)["boundary_cut"], + ): + super().__init__(u, b) - # Preconditioner - if self.options.precond is None: - pc = None - else: - pc_class = getattr(preconditioner, self.options.precond) + self._particles = particles + self._unit_b1 = unit_b1 + self._absB0 = absB0 + + self._info = solver["info"] + + self._scale_vec = coupling_params["Ah"] / coupling_params["Ab"] + + self._E1T = self.derham.extraction_ops["1"].transpose() + self._unit_b1 = self._E1T.dot(self._unit_b1) + + self._accumulated_magnetization = accumulated_magnetization + + self._boundary_cut_e1 = boundary_cut["e1"] + + self._ACC = Accumulator( + particles, + u_space, + Pyccelkernel(accum_kernels_gc.cc_lin_mhd_5d_M), + self.mass_ops, + self.domain.args_domain, + add_vector=True, + symmetry="symm", + filter_params=filter, + ) + + # if self._particles.control_variate: + + # # control variate method is only valid with Maxwellian distributions with "zero perp mean velocity". + # assert isinstance(self._particles.f0, Maxwellian) + + # self._ACC.init_control_variate(self.mass_ops) + + # # evaluate and save f0.n at quadrature points + # quad_pts = [quad_grid[nquad].points.flatten() + # for quad_grid, nquad in zip(self.derham.get_quad_grids(self.derham.Vh_fem['0']), self.derham.nquads)] + + # n0_at_quad = self.domain.push( + # self._particles.f0.n, *quad_pts, kind='0', squeeze_out=False) + + # # evaluate M0 = unit_b1 (1form) / absB0 (0form) * 2 * vth_perp² at quadrature points + # quad_pts_array = self.domain.prepare_eval_pts(*quad_pts)[:3] + + # vth_perp = self.particles.f0.vth(*quad_pts_array)[1] + + # absB0_at_quad = WeightedMassOperator.eval_quad(self.derham.Vh_fem['0'], self._absB0) + + # unit_b1_at_quad = WeightedMassOperator.eval_quad(self.derham.Vh_fem['1'], self._unit_b1) + + # self._M0_at_quad = unit_b1_at_quad / absB0_at_quad * vth_perp**2 * n0_at_quad * self._scale_vec + + # define block matrix [[A B], [C I]] (without time step size dt in the diagonals) + id_M = "M" + self.derham.space_to_form[u_space] + "n" + id_T = "T" + self.derham.space_to_form[u_space] + + _A = getattr(self.mass_ops, id_M) + _T = getattr(self.basis_ops, id_T) + + self._B = -1 / 2 * _T.T @ self.derham.curl.T @ self.mass_ops.M2 + self._C = 1 / 2 * self.derham.curl @ _T + self._B2 = -1 / 2 * _T.T @ self.derham.curl.T + + # Preconditioner + if solver["type"][1] is None: + pc = None + else: + pc_class = getattr(preconditioner, solver["type"][1]) pc = pc_class(getattr(self.mass_ops, id_M)) - if self.options.nonlinear: - # initialize operator TB - self._initialize_projection_operator_TB() + # Instantiate Schur solver (constant in this case) + _BC = self._B @ self._C + + self._schur_solver = SchurSolver( + _A, + _BC, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], + ) + + # allocate dummy vectors to avoid temporary array allocations + self._u_tmp1 = u.space.zeros() + self._u_tmp2 = u.space.zeros() + self._b_tmp1 = b.space.zeros() + + self._byn = self._B.codomain.zeros() + self._tmp_acc = self._B2.codomain.zeros() + + def __call__(self, dt): + # current variables + un = self.feec_vars[0] + bn = self.feec_vars[1] + + # perform accumulation (either with or without control variate) + # if self._particles.control_variate: + + # self._ACC.accumulate(self._particles, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._scale_vec, 0., + # control_vec=[self._M0_at_quad[0], self._M0_at_quad[1], self._M0_at_quad[2]]) + # else: + # self._ACC.accumulate(self._particles, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._scale_vec, 0.) - _T = _T + self._TB - _TT = _T.T + self._TBT + self._ACC( + self._unit_b1[0]._data, + self._unit_b1[1]._data, + self._unit_b1[2]._data, + self._scale_vec, + self._boundary_cut_e1, + ) + + self._ACC.vectors[0].copy(out=self._accumulated_magnetization) + + # solve for new u coeffs (no tmps created here) + byn = self._B.dot(bn, out=self._byn) + b2acc = self._B2.dot(self._ACC.vectors[0], out=self._tmp_acc) + byn += b2acc + + # b2acc.copy(out=self._accumulated_magnetization) + + un1, info = self._schur_solver(un, byn, dt, out=self._u_tmp1) + + # new b coeffs (no tmps created here) + _u = un.copy(out=self._u_tmp2) + _u += un1 + bn1 = self._C.dot(_u, out=self._b_tmp1) + bn1 *= -dt + bn1 += bn + + # write new coeffs into self.feec_vars + max_du, max_db = self.feec_vars_update(un1, bn1) + + if self._info and MPI.COMM_WORLD.Get_rank() == 0: + print("Status for ShearAlfven:", info["success"]) + print("Iterations for ShearAlfven:", info["niter"]) + print("Maxdiff up for ShearAlfven:", max_du) + print("Maxdiff b2 for ShearAlfven:", max_db) + print() + + +class MagnetosonicCurrentCoupling5D(Propagator): + r""" + :ref:`FEEC ` discretization of the following equations: + find :math:`\tilde \rho \in L^2, \tilde{\mathbf U} \in \{H(\textnormal{curl}), H(\textnormal{div}), (H^1)^3\}, \tilde p \in L^2` such that + + .. math:: + + \left\{ + \begin{aligned} + &\frac{\partial \tilde{\rho}}{\partial t} = - \nabla \cdot (\rho_0 \tilde{\mathbf U}) \,, + \\ + \int \rho_0 &\frac{\partial \tilde{\mathbf U}}{\partial t} \cdot \mathbf V \, \textnormal{d} \mathbf{x} = \int (\nabla \times \mathbf B_0) \times \tilde{\mathbf B} \cdot \mathbf V \, \textnormal{d} \mathbf x + \frac{A_\textnormal{h}}{A_b}\iint f^\text{vol} \mu \mathbf b_0 \cdot \nabla \times (\tilde{\mathbf B} \times \mathbf V) \, \textnormal{d} \mathbf x \textnormal{d} v_\parallel \textnormal{d} \mu + \int \tilde p \nabla \cdot \mathbf V \, \textnormal{d} \mathbf x \qquad \forall \, \mathbf V \in \{H(\textnormal{curl}), H(\textnormal{div}), (H^1)^3\}\,, + \\ + &\frac{\partial \tilde p}{\partial t} = - \nabla \cdot (p_0 \tilde{\mathbf U}) - (\gamma - 1) p_0 \nabla \cdot \tilde{\mathbf U} \,. + \end{aligned} + \right. + + :ref:`time_discret`: Crank-Nicolson (implicit mid-point). System size reduction via :class:`~struphy.linear_algebra.schur_solver.SchurSolver`: + + .. math:: + + \boldsymbol{\rho}^{n+1} - \boldsymbol{\rho}^n = - \frac{\Delta t}{2} \mathbb D \mathcal Q^\alpha (\mathbf u^{n+1} + \mathbf u^n) \,, + + .. math:: + + \begin{bmatrix} + \mathbf u^{n+1} - \mathbf u^n \\ \mathbf p^{n+1} - \mathbf p^n + \end{bmatrix} + = \frac{\Delta t}{2} + \begin{bmatrix} + 0 & (\mathbb M^{\alpha,n})^{-1} {\mathcal U^\alpha}^\top \mathbb D^\top \mathbb M_3 \\ - \mathbb D \mathcal S^\alpha - (\gamma - 1) \mathcal K^\alpha \mathbb D \mathcal U^\alpha & 0 + \end{bmatrix} + \begin{bmatrix} + (\mathbf u^{n+1} + \mathbf u^n) \\ (\mathbf p^{n+1} + \mathbf p^n) + \end{bmatrix} + + \begin{bmatrix} + \Delta t (\mathbb M^{\alpha,n})^{-1}\left[\mathbb M^{\alpha,J} \mathbf b^n + \frac{A_\textnormal{h}}{A_b}{\mathcal{T}^B}^\top \mathbb{C}^\top \sum_k^{N_p} \omega_k \mu_k \hat{\mathbf b}¹_0 (\boldsymbol \eta_k) \cdot \left(\frac{1}{\sqrt{g(\boldsymbol \eta_k)}} \vec \Lambda² (\boldsymbol \eta_k) \right)\right] \\ 0 + \end{bmatrix} \,, + + where + :math:`\mathcal U^\alpha`, :math:`\mathcal S^\alpha`, :math:`\mathcal K^\alpha` and :math:`\mathcal Q^\alpha` are :class:`~struphy.feec.basis_projection_ops.BasisProjectionOperators` and + :math:`\mathbb M^{\alpha,n}` and :math:`\mathbb M^{\alpha,J}` are :class:`~struphy.feec.mass.WeightedMassOperators` being weighted with :math:`\rho_0` the MHD equilibrium density. + :math:`\alpha \in \{1, 2, v\}` denotes the :math:`\alpha`-form space where the operators correspond to. + Moreover, :math:`\sum_k^{N_p} \omega_k \mu_k \hat{\mathbf b}¹_0 (\boldsymbol \eta_k) \cdot \left(\frac{1}{\sqrt{g(\boldsymbol \eta_k)}} \vec \Lambda² (\boldsymbol \eta_k)\right)` is accumulated by the kernel :class:`~struphy.pic.accumulation.accum_kernels_gc.cc_lin_mhd_5d_M` and + the time-varying projection operator :math:`\mathcal{T}^B` is defined as + + .. math:: + + \mathcal{T}^B_{(\mu,ijk),(\nu,mno)} := \hat \Pi¹_{(\mu,ijk)} \left[ \epsilon_{\mu \alpha \nu} \frac{\tilde{B}^2_\alpha}{\sqrt{g}} \Lambda²_{\nu,mno} \right] \,. + """ + + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pbicgstab", "MassMatrixPreconditioner"), + ("bicgstab", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["filter"] = { + "use_filter": None, + "modes": (0, 1), + "repeat": 3, + "alpha": 0.5, + } + dct["boundary_cut"] = { + "e1": 0.0, + "e2": 0.0, + "e3": 0.0, + } + dct["turn_off"] = False + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + n: StencilVector, + u: BlockVector, + p: StencilVector, + *, + particles: Particles5D, + b: BlockVector, + absB0: StencilVector, + unit_b1: BlockVector, + u_space: str, + solver: dict = options(default=True)["solver"], + filter: dict = options(default=True)["filter"], + coupling_params: dict, + boundary_cut: dict = options(default=True)["boundary_cut"], + ): + super().__init__(n, u, p) + + self._particles = particles + self._b = b + self._unit_b1 = unit_b1 + self._absB0 = absB0 + + self._info = solver["info"] + + self._scale_vec = coupling_params["Ah"] / coupling_params["Ab"] + + self._E1T = self.derham.extraction_ops["1"].transpose() + self._unit_b1 = self._E1T.dot(self._unit_b1) + + self._u_id = self.derham.space_to_form[u_space] + if self._u_id == "v": + self._space_key_int = 0 else: - _TT = _T.T + self._space_key_int = int(self._u_id) + + self._boundary_cut_e1 = boundary_cut["e1"] + + self._ACC = Accumulator( + particles, + u_space, + Pyccelkernel(accum_kernels_gc.cc_lin_mhd_5d_M), + self.mass_ops, + self.domain.args_domain, + add_vector=True, + symmetry="symm", + filter_params=filter, + ) - if self.options.algo == "implicit": - self._info = self.options.solver_params.info + # if self._particles.control_variate: - # define block matrix [[A B], [C I]] (without time step size dt in the diagonals) - self._B = -1 / 2 * _TT @ curl.T @ M2 - self._B2 = -1 / 2 * _TT @ curl.T @ PB.T + # # control variate method is only valid with Maxwellian distributions with "zero perp mean velocity". + # assert isinstance(self._particles.f0, Maxwellian) - self._C = 1 / 2 * curl @ _T + # self._ACC.init_control_variate(self.mass_ops) - # Instantiate Schur solver (constant in this case) - _BC = self._B @ self._C + # # evaluate and save f0.n at quadrature points + # quad_pts = [quad_grid[nquad].points.flatten() + # for quad_grid, nquad in zip(self.derham.get_quad_grids(self.derham.Vh_fem['0']), self.derham.nquads)] - self._schur_solver = SchurSolver( - _A, - _BC, - self.options.solver, - precond=pc, - solver_params=self.options.solver_params, - ) + # n0_at_quad = self.domain.push( + # self._particles.f0.n, *quad_pts, kind='0', squeeze_out=False) - # allocate dummy vectors to avoid temporary array allocations - self._u_tmp1 = self.variables.u.spline.vector.space.zeros() - self._u_tmp2 = self.variables.u.spline.vector.space.zeros() - self._b_tmp1 = self.variables.b.spline.vector.space.zeros() + # # evaluate M0 = unit_b1 (1form) / absB0 (0form) * 2 * vth_perp² at quadrature points + # quad_pts_array = self.domain.prepare_eval_pts(*quad_pts)[:3] - self._byn = self._B.codomain.zeros() - self._tmp_acc = self._B2.codomain.zeros() + # vth_perp = self.particles.f0.vth(*quad_pts_array)[1] + # absB0_at_quad = WeightedMassOperator.eval_quad(self.derham.Vh_fem['0'], self._absB0) + + # unit_b1_at_quad = WeightedMassOperator.eval_quad(self.derham.Vh_fem['1'], self._unit_b1) + + # self._M0_at_quad = unit_b1_at_quad / absB0_at_quad * vth_perp**2 * n0_at_quad * self._scale_vec + + # define block matrix [[A B], [C I]] (without time step size dt in the diagonals) + id_Mn = "M" + self._u_id + "n" + id_MJ = "M" + self._u_id + "J" + + if self._u_id == "1": + id_S, id_U, id_K, id_Q = "S1", "U1", "K3", "Q1" + elif self._u_id == "2": + id_S, id_U, id_K, id_Q = "S2", None, "K3", "Q2" + elif self._u_id == "v": + id_S, id_U, id_K, id_Q = "Sv", "Uv", "K3", "Qv" + + self._E2T = self.derham.extraction_ops["2"].transpose() + + _A = getattr(self.mass_ops, id_Mn) + _S = getattr(self.basis_ops, id_S) + _K = getattr(self.basis_ops, id_K) + + # initialize projection operator TB + self._initialize_projection_operator_TB() + + if id_U is None: + _U, _UT = IdentityOperator(u.space), IdentityOperator(u.space) else: - self._info = False + _U = getattr(self.basis_ops, id_U) + _UT = _U.T - # define vector field - A_inv = inverse( - _A, - self.options.solver, - pc=pc, - tol=self.options.solver_params.tol, - maxiter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, - ) - _f1 = A_inv @ _TT @ curl.T @ M2 - _f1_acc = A_inv @ _TT @ curl.T @ PB.T - _f2 = curl @ _T + self._B = -1 / 2.0 * _UT @ self.derham.div.T @ self.mass_ops.M3 + self._C = 1 / 2.0 * (self.derham.div @ _S + 2 / 3.0 * _K @ self.derham.div @ _U) - # allocate output of vector field - out_acc = self.variables.u.spline.vector.space.zeros() - out1 = self.variables.u.spline.vector.space.zeros() - out2 = self.variables.b.spline.vector.space.zeros() + self._MJ = getattr(self.mass_ops, id_MJ) + self._DQ = self.derham.div @ getattr(self.basis_ops, id_Q) - def f1(t, y1, y2, out: BlockVector = out1): - _f1.dot(y2, out=out) - _f1_acc.dot(self._ACC.vectors[0], out=out_acc) - out += out_acc - out.update_ghost_regions() - return out + self._TC = self._TB.T @ self.derham.curl.T - def f2(t, y1, y2, out: BlockVector = out2): - _f2.dot(y1, out=out) - out *= -1.0 - out.update_ghost_regions() - return out + # preconditioner + if solver["type"][1] is None: + pc = None + else: + pc_class = getattr(preconditioner, solver["type"][1]) + pc = pc_class(getattr(self.mass_ops, id_Mn)) + + # instantiate Schur solver (constant in this case) + _BC = self._B @ self._C + + self._schur_solver = SchurSolver( + _A, + _BC, + solver["type"][0], + pc=pc, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], + ) - vector_field = {self.variables.u.spline.vector: f1, self.variables.b.spline.vector: f2} - self._ode_solver = ODEsolverFEEC(vector_field, butcher=self.options.butcher) + # allocate dummy vectors to avoid temporary array allocations + self._u_tmp1 = u.space.zeros() + self._u_tmp2 = u.space.zeros() + self._p_tmp1 = p.space.zeros() + self._n_tmp1 = n.space.zeros() + self._byn1 = self._B.codomain.zeros() + self._byn2 = self._B.codomain.zeros() + self._tmp_acc = self._TC.codomain.zeros() def __call__(self, dt): - # update time-dependent operator TB - if self.options.nonlinear: - self._update_weights_TB() + # current variables + nn = self.feec_vars[0] + un = self.feec_vars[1] + pn = self.feec_vars[2] - # current FE coeffs - un = self.variables.u.spline.vector - bn = self.variables.b.spline.vector + # perform accumulation (either with or without control variate) + # if self._particles.control_variate: - # accumulate - self._ACC(self.options.ep_scale) + # self._ACC.accumulate(self._particles, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._scale_vec, 0., + # control_vec=[self._M0_at_quad[0], self._M0_at_quad[1], self._M0_at_quad[2]]) + # else: + # self._ACC.accumulate(self._particles, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._scale_vec, 0.) - if self.options.algo == "implicit": - # solve for new u coeffs (no tmps created here) - byn = self._B.dot(bn, out=self._byn) - b2acc = self._B2.dot(self._ACC.vectors[0], out=self._tmp_acc) - byn += b2acc + self._ACC( + self._unit_b1[0]._data, + self._unit_b1[1]._data, + self._unit_b1[2]._data, + self._scale_vec, + self._boundary_cut_e1, + ) - un1, info = self._schur_solver(un, byn, dt, out=self._u_tmp1) + # update time-dependent operator + self._b.update_ghost_regions() + self._update_weights_TB() - # new b coeffs (no tmps created here) - _u = un.copy(out=self._u_tmp2) - _u += un1 - bn1 = self._C.dot(_u, out=self._b_tmp1) - bn1 *= -dt - bn1 += bn + # solve for new u coeffs (no tmps created here) + byn1 = self._B.dot(pn, out=self._byn1) + byn2 = self._MJ.dot(self._b, out=self._byn2) + b2acc = self._TC.dot(self._ACC.vectors[0], out=self._tmp_acc) + byn2 += b2acc + byn2 *= 1 / 2 + byn1 -= byn2 - diffs = self.update_feec_variables(u=un1, b=bn1) + un1, info = self._schur_solver(un, byn1, dt, out=self._u_tmp1) - else: - self._ode_solver(0.0, dt) + # new p, n, b coeffs (no tmps created here) + _u = un.copy(out=self._u_tmp2) + _u += un1 + pn1 = self._C.dot(_u, out=self._p_tmp1) + pn1 *= -dt + pn1 += pn + + nn1 = self._DQ.dot(_u, out=self._n_tmp1) + nn1 *= -dt / 2 + nn1 += nn + + # write new coeffs into self.feec_vars + max_dn, max_du, max_dp = self.feec_vars_update( + nn1, + un1, + pn1, + ) if self._info and MPI.COMM_WORLD.Get_rank() == 0: - if self.options.algo == "implicit": - print("Status for ShearAlfvenCurrentCoupling5D:", info["success"]) - print("Iterations for ShearAlfvenCurrentCoupling5D:", info["niter"]) - print("Maxdiff up for ShearAlfvenCurrentCoupling5D:", diffs["u"]) - print("Maxdiff b2 for ShearAlfvenCurrentCoupling5D:", diffs["b"]) - print() + print("Status for Magnetosonic:", info["success"]) + print("Iterations for Magnetosonic:", info["niter"]) + print("Maxdiff n3 for Magnetosonic:", max_dn) + print("Maxdiff up for Magnetosonic:", max_du) + print("Maxdiff p3 for Magnetosonic:", max_dp) + print() def _initialize_projection_operator_TB(self): r"""Initialize BasisProjectionOperator TB with the time-varying weight. @@ -2184,80 +2150,27 @@ def _initialize_projection_operator_TB(self): # Call the projector and the space P1 = self.derham.P["1"] - Vh = self.derham.Vh_fem[self._u_form] + Vh = self.derham.Vh_fem[self._u_id] # Femfield for the field evaluation self._bf = self.derham.create_spline_function("bf", "Hdiv") + # define temp callable + def tmp(x, y, z): + return 0 * x + # Initialize BasisProjectionOperator if self.derham._with_local_projectors: - self._TB = BasisProjectionOperatorLocal( - P1, - Vh, - [ - [None, None, None], - [None, None, None], - [None, None, None], - ], - transposed=False, - use_cache=True, - polar_shift=True, - V_extraction_op=self.derham.extraction_ops[self._u_form], - V_boundary_op=self.derham.boundary_ops[self._u_form], - P_boundary_op=self.derham.boundary_ops["1"], - ) - self._TBT = BasisProjectionOperatorLocal( - P1, - Vh, - [ - [None, None, None], - [None, None, None], - [None, None, None], - ], - transposed=True, - use_cache=True, - polar_shift=True, - V_extraction_op=self.derham.extraction_ops[self._u_form], - V_boundary_op=self.derham.boundary_ops[self._u_form], - P_boundary_op=self.derham.boundary_ops["1"], - ) + self._TB = BasisProjectionOperatorLocal(P1, Vh, [[tmp, tmp, tmp]]) else: - self._TB = BasisProjectionOperator( - P1, - Vh, - [ - [None, None, None], - [None, None, None], - [None, None, None], - ], - transposed=False, - use_cache=True, - polar_shift=True, - V_extraction_op=self.derham.extraction_ops[self._u_form], - V_boundary_op=self.derham.boundary_ops[self._u_form], - P_boundary_op=self.derham.boundary_ops["1"], - ) - self._TBT = BasisProjectionOperator( - P1, - Vh, - [ - [None, None, None], - [None, None, None], - [None, None, None], - ], - transposed=True, - use_cache=True, - polar_shift=True, - V_extraction_op=self.derham.extraction_ops[self._u_form], - V_boundary_op=self.derham.boundary_ops[self._u_form], - P_boundary_op=self.derham.boundary_ops["1"], - ) + self._TB = BasisProjectionOperator(P1, Vh, [[tmp, tmp, tmp]]) def _update_weights_TB(self): """Updats time-dependent weights of the BasisProjectionOperator TB""" # Update Femfield - self.variables.b.spline.vector.copy(out=self._bf.vector) + self._bf.vector = self._b + self._bf.vector.update_ghost_regions() # define callable weights def bf1(x, y, z): @@ -2275,7 +2188,7 @@ def bf3(x, y, z): fun = [] - if self._u_form == "v": + if self._u_id == "v": for m in range(3): fun += [[]] for n in range(3): @@ -2283,7 +2196,7 @@ def bf3(x, y, z): lambda e1, e2, e3, m=m, n=n: rot_B(e1, e2, e3)[:, :, :, m, n], ] - elif self._u_form == "1": + elif self._u_id == "1": for m in range(3): fun += [[]] for n in range(3): @@ -2309,9 +2222,8 @@ def bf3(x, y, z): / abs(self.domain.jacobian_det(e1, e2, e3, squeeze_out=False)), ] - # update BasisProjectionOperator + # Initialize BasisProjectionOperator self._TB.update_weights(fun) - self._TBT.update_weights(fun) class CurrentCoupling5DDensity(Propagator): @@ -2331,166 +2243,268 @@ class CurrentCoupling5DDensity(Propagator): For the detail explanation of the notations, see `2022_DriftKineticCurrentCoupling `_. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space in ("Hcurl", "Hdiv", "H1vec") - self._u = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - energetic_ions: PICVariable = None - b_tilde: FEECVariable = None - ep_scale: float = 1.0 - u_space: OptsVecSpace = "Hdiv" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - filter_params: FilterParameters = None - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - assert isinstance(self.energetic_ions, PICVariable) - assert self.energetic_ions.space == "Particles5D" - assert isinstance(self.b_tilde, FEECVariable) - assert isinstance(self.ep_scale, float) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.filter_params is None: - self.filter_params = FilterParameters() + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("pbicgstab", "MassMatrixPreconditioner"), + ("bicgstab", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["filter"] = { + "use_filter": None, + "modes": (1), + "repeat": 1, + "alpha": 0.5, + } + dct["boundary_cut"] = { + "e1": 0.0, + "e2": 0.0, + "e3": 0.0, + } + dct["turn_off"] = False + + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - if self.options.u_space == "H1vec": - self._u_form_int = 0 + return dct + + def __init__( + self, + u: BlockVector, + *, + particles: Particles5D, + b: BlockVector, + b_eq: BlockVector, + unit_b1: BlockVector, + curl_unit_b2: BlockVector, + u_space: str, + solver: dict = options(default=True)["solver"], + coupling_params: dict, + epsilon: float = 1.0, + filter: dict = options(default=True)["filter"], + boundary_cut: dict = options(default=True)["boundary_cut"], + ): + super().__init__(u) + + # assert parameters and expose some quantities to self + assert isinstance(particles, (Particles5D)) + + assert u_space in {"Hcurl", "Hdiv", "H1vec"} + + if u_space == "H1vec": + self._space_key_int = 0 else: - self._u_form_int = int(self.derham.space_to_form[self.options.u_space]) + self._space_key_int = int( + self.derham.space_to_form[u_space], + ) - # call operatros - id_M = "M" + self.derham.space_to_form[self.options.u_space] + "n" - self._A = getattr(self.mass_ops, id_M) + self._epsilon = epsilon + self._particles = particles + self._b = b + self._b_eq = b_eq + self._unit_b1 = unit_b1 + self._curl_norm_b = curl_unit_b2 - # magnetic equilibrium field - unit_b1 = self.projected_equil.unit_b1 - curl_unit_b1 = self.projected_equil.curl_unit_b1 - self._b2 = self.projected_equil.b2 + self._info = solver["info"] - # scaling factor - epsilon = self.options.energetic_ions.species.equation_params.epsilon + self._scale_mat = coupling_params["Ah"] / coupling_params["Ab"] / self._epsilon - # temporary vectors to avoid memory allocation - self._b_full = self._b2.space.zeros() - self._rhs_v = self.variables.u.spline.vector.space.zeros() - self._u_new = self.variables.u.spline.vector.space.zeros() + self._boundary_cut_e1 = boundary_cut["e1"] - # define Accumulator and arguments - self._ACC = Accumulator( - self.options.energetic_ions.particles, - self.options.u_space, + self._accumulator = Accumulator( + particles, + u_space, Pyccelkernel(accum_kernels_gc.cc_lin_mhd_5d_D), self.mass_ops, self.domain.args_domain, add_vector=False, symmetry="asym", - filter_params=self.options.filter_params, + filter_params=filter, ) - self._args_accum_kernel = ( - epsilon, - self.options.ep_scale, - self._b_full[0]._data, - self._b_full[1]._data, - self._b_full[2]._data, - unit_b1[0]._data, - unit_b1[1]._data, - unit_b1[2]._data, - curl_unit_b1[0]._data, - curl_unit_b1[1]._data, - curl_unit_b1[2]._data, - self._u_form_int, - ) + # if self._particles.control_variate: - # Preconditioner - if self.options.precond is None: - pc = None + # # control variate method is only valid with Maxwellian distributions + # assert isinstance(self._particles.f0, Maxwellian) + # assert params['u_space'] == 'Hdiv' + + # # evaluate and save f0.n / |det(DF)| at quadrature points + # quad_pts = [quad_grid[nquad].points.flatten() + # for quad_grid, nquad in zip(self.derham.get_quad_grids(self.derham.Vh_fem['0']), self.derham.nquads)] + + # self._n0_at_quad = self.domain.push( + # self._particles.f0.n, *quad_pts, kind='3', squeeze_out=False) + + # # prepare field evaluation + # quad_pts_array = self.domain.prepare_eval_pts(*quad_pts)[:3] + + # u0_parallel = self._particles.f0.u(*quad_pts_array)[0] + + # det_df_at_quad = self.domain.jacobian_det(*quad_pts, squeeze_out=False) + + # # evaluate unit_b1 / |det(DF)| at quadrature points + # self._unit_b1_at_quad = WeightedMassOperator.eval_quad(self.derham.Vh_fem['1'], self._unit_b1) + # self._unit_b1_at_quad /= det_df_at_quad + + # # evaluate unit_b1 (1form) dot epsilon * f0.u * curl_norm_b (2form) / |det(DF)| at quadrature points + # curl_norm_b_at_quad = WeightedMassOperator.eval_quad(self.derham.Vh_fem['2'], self._curl_norm_b) + + # self._unit_b1_dot_curl_norm_b_at_quad = np.sum(p * q for p, q in zip(self._unit_b1_at_quad, curl_norm_b_at_quad)) + + # self._unit_b1_dot_curl_norm_b_at_quad /= det_df_at_quad + # self._unit_b1_dot_curl_norm_b_at_quad *= self._epsilon + # self._unit_b1_dot_curl_norm_b_at_quad *= u0_parallel + + # # memory allocation for magnetic field at quadrature points + # self._b_quad1 = np.zeros_like(self._n0_at_quad) + # self._b_quad2 = np.zeros_like(self._n0_at_quad) + # self._b_quad3 = np.zeros_like(self._n0_at_quad) + + # # memory allocation for parallel magnetic field at quadrature points + # self._B_para = np.zeros_like(self._n0_at_quad) + + # # memory allocation for control_const at quadrature points + # self._control_const = np.zeros_like(self._n0_at_quad) + + # # memory allocation for self._b_quad x self._nh0_at_quad * self._coupling_const + # self._mat12 = np.zeros_like(self._n0_at_quad) + # self._mat13 = np.zeros_like(self._n0_at_quad) + # self._mat23 = np.zeros_like(self._n0_at_quad) + + # self._mat21 = np.zeros_like(self._n0_at_quad) + # self._mat31 = np.zeros_like(self._n0_at_quad) + # self._mat32 = np.zeros_like(self._n0_at_quad) + + u_id = self.derham.space_to_form[u_space] + self._M = getattr(self.mass_ops, "M" + u_id + "n") + + self._E0T = self.derham.extraction_ops["0"].transpose() + self._EuT = self.derham.extraction_ops[u_id].transpose() + self._E1T = self.derham.extraction_ops["1"].transpose() + self._E2T = self.derham.extraction_ops["2"].transpose() + + self._PB = getattr(self.basis_ops, "PB") + self._unit_b1 = self._E1T.dot(self._unit_b1) + + # preconditioner + if solver["type"][1] is None: + self._pc = None else: - pc_class = getattr(preconditioner, self.options.precond) - pc = pc_class(getattr(self.mass_ops, id_M)) + pc_class = getattr(preconditioner, solver["type"][1]) + self._pc = pc_class(self._M) # linear solver - self._A_inv = inverse( - self._A, - self.options.solver, - pc=pc, - tol=self.options.solver_params.tol, - maxiter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, + self._solver = inverse( + self._M, + solver["type"][0], + pc=self._pc, + x0=self.feec_vars[0], + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], ) + # temporary vectors to avoid memory allocation + self._b_full1 = self._b_eq.space.zeros() + self._b_full2 = self._E2T.codomain.zeros() + self._rhs_v = u.space.zeros() + self._u_new = u.space.zeros() + def __call__(self, dt): - # current FE coeffs - un = self.variables.u.spline.vector + # pointer to old coefficients + un = self.feec_vars[0] # sum up total magnetic field b_full1 = b_eq + b_tilde (in-place) - b_full = self._b2.copy(out=self._b_full) + b_full = self._b_eq.copy(out=self._b_full1) - b_full += self.options.b_tilde.spline.vector - b_full.update_ghost_regions() + if self._b is not None: + b_full += self._b - self._ACC( - *self._args_accum_kernel, + Eb_full = self._E2T.dot(b_full, out=self._b_full2) + Eb_full.update_ghost_regions() + + # perform accumulation (either with or without control variate) + # if self._particles.control_variate: + + # # evaluate magnetic field at quadrature points (in-place) + # WeightedMassOperator.eval_quad(self.derham.Vh_fem['2'], self._b_full2, + # out=[self._b_quad1, self._b_quad2, self._b_quad3]) + + # # evaluate B_parallel + # self._B_para = np.sum(p * q for p, q in zip(self._unit_b1_at_quad, [self._b_quad1, self._b_quad2, self._b_quad3])) + + # # evaluate coupling_const 1 - B_parallel / B^star_parallel + # self._control_const = 1 - (self._B_para / (self._B_para + self._unit_b1_dot_curl_norm_b_at_quad)) + + # # assemble (B x) + # self._mat12[:, :, :] = self._scale_mat * \ + # self._b_quad3 * self._n0_at_quad * self._control_const + # self._mat13[:, :, :] = -self._scale_mat * \ + # self._b_quad2 * self._n0_at_quad * self._control_const + # self._mat23[:, :, :] = self._scale_mat * \ + # self._b_quad1 * self._n0_at_quad * self._control_const + + # self._mat21[:, :, :] = -self._mat12 + # self._mat31[:, :, :] = -self._mat13 + # self._mat32[:, :, :] = -self._mat23 + + # self._accumulator.accumulate(self._particles, self._epsilon, + # Eb_full[0]._data, Eb_full[1]._data, Eb_full[2]._data, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._curl_norm_b[0]._data, self._curl_norm_b[1]._data, self._curl_norm_b[2]._data, + # self._space_key_int, self._scale_mat, 0.1, + # control_mat=[[None, self._mat12, self._mat13], + # [self._mat21, None, self._mat23], + # [self._mat31, self._mat32, None]]) + # else: + # self._accumulator.accumulate(self._particles, self._epsilon, + # Eb_full[0]._data, Eb_full[1]._data, Eb_full[2]._data, + # self._unit_b1[0]._data, self._unit_b1[1]._data, self._unit_b1[2]._data, + # self._curl_norm_b[0]._data, self._curl_norm_b[1]._data, self._curl_norm_b[2]._data, + # self._space_key_int, self._scale_mat, 0.) + + self._accumulator( + self._epsilon, + Eb_full[0]._data, + Eb_full[1]._data, + Eb_full[2]._data, + self._unit_b1[0]._data, + self._unit_b1[1]._data, + self._unit_b1[2]._data, + self._curl_norm_b[0]._data, + self._curl_norm_b[1]._data, + self._curl_norm_b[2]._data, + self._space_key_int, + self._scale_mat, + self._boundary_cut_e1, ) # define system (M - dt/2 * A)*u^(n + 1) = (M + dt/2 * A)*u^n - lhs = self._A - dt / 2 * self._ACC.operators[0] - rhs = self._A + dt / 2 * self._ACC.operators[0] + lhs = self._M - dt / 2 * self._accumulator.operators[0] + rhs = self._M + dt / 2 * self._accumulator.operators[0] # solve linear system for updated u coefficients (in-place) rhs = rhs.dot(un, out=self._rhs_v) - self._A_inv.linop = lhs + self._solver.linop = lhs - _u = self._A_inv.solve(rhs, out=self._u_new) - info = self._A_inv._info + un1 = self._solver.solve(rhs, out=self._u_new) + info = self._solver._info - diffs = self.update_feec_variables(u=_u) + # write new coeffs into Propagator.variables + max_du = self.feec_vars_update(un1) - if self.options.solver_params.info and MPI.COMM_WORLD.Get_rank() == 0: + if self._info and MPI.COMM_WORLD.Get_rank() == 0: print("Status for CurrentCoupling5DDensity:", info["success"]) print("Iterations for CurrentCoupling5DDensity:", info["niter"]) - print("Maxdiff up for CurrentCoupling5DDensity:", diffs["u"]) + print("Maxdiff up for CurrentCoupling5DDensity:", max_du) print() @@ -2524,134 +2538,129 @@ class ImplicitDiffusion(Propagator): * :math:`\sigma_1=\sigma_2=0` and :math:`\sigma_3 = \Delta t`: **Poisson solver** with a given charge density :math:`\sum_i\rho_i`. * :math:`\sigma_2=0` and :math:`\sigma_1 = \sigma_3 = \Delta t` : Poisson with **adiabatic electrons**. * :math:`\sigma_1=\sigma_2=1` and :math:`\sigma_3 = 0`: **Implicit heat equation**. + + Parameters + ---------- + phi : StencilVector + FE coefficients of the solution as a discrete 0-form. + + sigma_1, sigma_2, sigma_3 : float | int + Equation parameters. + + divide_by_dt : bool + Whether to divide the sigmas by dt during __call__. + + stab_mat : str + Name of the matrix :math:`M^0_{n_0}`. + + diffusion_mat : str + Name of the matrix :math:`M^1_{D_0}`. + + rho : StencilVector or tuple or list + (List of) right-hand side FE coefficients of a 0-form (optional, can be set with a setter later). + Can be either a) StencilVector or b) 2-tuple, or a list of those. + In case b) the first tuple entry must be :class:`~struphy.pic.accumulation.particles_to_grid.AccumulatorVector`, + and the second entry must be :class:`~struphy.pic.base.Particles`. + + x0 : StencilVector + Initial guess for the iterative solver (optional, can be set with a setter later). + + solver : dict + Parameters for the iterative solver (see ``__init__`` for details). """ - class Variables: - def __init__(self): - self._phi: FEECVariable = None - - @property - def phi(self) -> FEECVariable: - return self._phi - - @phi.setter - def phi(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1" - self._phi = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsStabMat = Literal["M0", "M0ad", "Id"] - OptsDiffusionMat = Literal["M1", "M1perp"] - # propagator options - sigma_1: float = 1.0 - sigma_2: float = 0.0 - sigma_3: float = 1.0 - divide_by_dt: bool = False - stab_mat: OptsStabMat = "M0" - diffusion_mat: OptsDiffusionMat = "M1" - rho: FEECVariable | Callable | tuple[AccumulatorVector, Particles] | list = None - rho_coeffs: float | list = None - x0: StencilVector = None - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.stab_mat, self.OptsStabMat) - check_option(self.diffusion_mat, self.OptsDiffusionMat) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["model"] = { + "sigma_1": 1.0, + "sigma_2": 0.0, + "sigma_3": 1.0, + "stab_mat": ["M0", "M0ad", "Id"], + "diffusion_mat": ["M1", "M1perp"], + } + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - # always stabilize - if xp.abs(self.options.sigma_1) < 1e-14: - self.options.sigma_1 = 1e-14 - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"Stabilizing Poisson solve with {self.options.sigma_1 =}") + return dct - # model parameters - self._sigma_1 = self.options.sigma_1 - self._sigma_2 = self.options.sigma_2 - self._sigma_3 = self.options.sigma_3 - self._divide_by_dt = self.options.divide_by_dt + def __init__( + self, + phi: StencilVector, + *, + sigma_1: float = options()["model"]["sigma_1"], + sigma_2: float = options()["model"]["sigma_2"], + sigma_3: float = options()["model"]["sigma_3"], + divide_by_dt: bool = False, + stab_mat: str = options(default=True)["model"]["stab_mat"], + diffusion_mat: str = options(default=True)["model"]["diffusion_mat"], + rho: StencilVector | tuple | list | Callable = None, + x0: StencilVector = None, + solver: dict = options(default=True)["solver"], + ): + assert phi.space == self.derham.Vh["0"] - phi = self.variables.phi.spline.vector + super().__init__(phi) - # collect rhs - def verify_rhs(rho) -> StencilVector | FEECVariable | AccumulatorVector: - """Perform preliminary operations on rho to comute the rhs and return the result.""" - if rho is None: - rhs = phi.space.zeros() - elif isinstance(rho, FEECVariable): - assert rho.space == "H1" - rhs = rho - elif isinstance(rho, AccumulatorVector): - rhs = rho - elif isinstance(rho, Callable): - rhs = L2Projector("H1", self.mass_ops).get_dofs(rho, apply_bc=True) - else: - raise TypeError(f"{type(rho) =} is not accepted.") + # always stabilize + if np.abs(sigma_1) < 1e-14: + sigma_1 = 1e-14 + print(f"Stabilizing Poisson solve with {sigma_1 = }") - return rhs + # model parameters + self._sigma_1 = sigma_1 + self._sigma_2 = sigma_2 + self._sigma_3 = sigma_3 + self._divide_by_dt = divide_by_dt - rho = self.options.rho - if isinstance(rho, list): - self._sources = [] - for r in rho: - self._sources += [verify_rhs(r)] + # collect rhs + if rho is None: + self._rho = [phi.space.zeros()] else: - self._sources = [verify_rhs(rho)] - - # coeffs of rhs - if self.options.rho_coeffs is not None: - if isinstance(self.options.rho_coeffs, (list, tuple)): - self._coeffs = self.options.rho_coeffs + if isinstance(rho, list): + for r in rho: + if isinstance(r, tuple): + assert isinstance(r[0], AccumulatorVector) + assert isinstance(r[1], Particles) + # assert r.space_id == 'H1' + else: + assert r.space == phi.space + elif isinstance(rho, tuple): + assert isinstance(rho[0], AccumulatorVector) + assert isinstance(rho[1], Particles) + # assert rho[0].space_id == 'H1' + rho = [rho] + elif isinstance(rho, Callable): + rho = [rho()] else: - self._coeffs = [self.options.rho_coeffs] - assert len(self._coeffs) == len(self._sources) - else: - self._coeffs = [1.0 for src in self.sources] + assert rho.space == phi.space + rho = [rho] + self._rho = rho # initial guess and solver params - self._x0 = self.options.x0 - self._info = self.options.solver_params.info + self._x0 = x0 + self._info = solver["info"] - if self.options.stab_mat == "Id": + if stab_mat == "Id": stab_mat = IdentityOperator(phi.space) else: - stab_mat = getattr(self.mass_ops, self.options.stab_mat) + stab_mat = getattr(self.mass_ops, stab_mat) - if isinstance(self.options.diffusion_mat, str): - diffusion_mat = getattr(self.mass_ops, self.options.diffusion_mat) + print(f"{diffusion_mat = }") + if isinstance(diffusion_mat, str): + diffusion_mat = getattr(self.mass_ops, diffusion_mat) else: - diffusion_mat = self.options.diffusion_mat assert isinstance(diffusion_mat, WeightedMassOperator) assert diffusion_mat.domain == self.derham.grad.codomain assert diffusion_mat.codomain == self.derham.grad.codomain @@ -2661,7 +2670,7 @@ def verify_rhs(rho) -> StencilVector | FEECVariable | AccumulatorVector: self._diffusion_op = self.derham.grad.T @ diffusion_mat @ self.derham.grad # preconditioner and solver for Ax=b - if self.options.precond is None: + if solver["type"][1] is None: pc = None else: # TODO: waiting for multigrid preconditioner @@ -2670,56 +2679,79 @@ def verify_rhs(rho) -> StencilVector | FEECVariable | AccumulatorVector: # solver just with A_2, but will be set during call with dt self._solver = inverse( self._diffusion_op, - self.options.solver, + solver["type"][0], pc=pc, x0=self.x0, - tol=self.options.solver_params.tol, - maxiter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, - recycle=self.options.solver_params.recycle, + tol=solver["tol"], + maxiter=solver["maxiter"], + verbose=solver["verbose"], + recycle=solver["recycle"], ) # allocate memory for solution self._tmp = phi.space.zeros() self._rhs = phi.space.zeros() self._rhs2 = phi.space.zeros() - self._tmp_src = phi.space.zeros() @property - def sources(self) -> list[StencilVector | FEECVariable | AccumulatorVector]: + def rho(self): """ - Right-hand side of the equation (sources). + (List of) right-hand side FE coefficients of a 0-form. + The list entries can be either a) StencilVectors or b) 2-tuples; + in the latter case, the first tuple entry must be :class:`~struphy.pic.accumulation.particles_to_grid.AccumulatorVector`, + and the second entry must be :class:`~struphy.pic.base.Particles`. """ - return self._sources + return self._rho - @property - def coeffs(self) -> list[float]: - """ - Same length as self.sources. Coefficients multiplied with sources before solve (default is 1.0). + @rho.setter + def rho(self, value): + """In-place setter for StencilVector/PolarVector. + If rho is a list, len(value) msut be len(rho) and value can contain None. """ - return self._coeffs + if isinstance(value, list): + assert len(value) == len(self.rho) + for i, (val, r) in enumerate(zip(value, self.rho)): + if val is None: + continue + elif isinstance(val, tuple): + assert isinstance(val[0], AccumulatorVector) + assert isinstance(val[1], Particles) + assert isinstance(r, tuple) + self._rho[i] = val + else: + assert val.space == r.space + r[:] = val[:] + elif isinstance(ValueError, tuple): + assert isinstance(value[0], AccumulatorVector) + assert isinstance(value[1], Particles) + assert len(self.rho) == 1 + # assert rho[0].space_id == 'H1' + self._rho[0] = value + else: + assert value.space == self.derham.Vh["0"] + assert len(self.rho) == 1 + self._rho[0][:] = value[:] @property def x0(self): """ psydac.linalg.stencil.StencilVector or struphy.polar.basic.PolarVector. First guess of the iterative solver. """ - return self.options.x0 + return self._x0 @x0.setter - def x0(self, value: StencilVector): + def x0(self, value): """In-place setter for StencilVector/PolarVector. First guess of the iterative solver.""" assert value.space == self.derham.Vh["0"] assert value.space.symbolic_space == "H1", ( f"Right-hand side must be in H1, but is in {value.space.symbolic_space}." ) - if self.options.x0 is None: - self.options.x0 = value + if self._x0 is None: + self._x0 = value else: - self.options.x0[:] = value[:] + self._x0[:] = value[:] - @profile def __call__(self, dt): # set parameters if self._divide_by_dt: @@ -2732,20 +2764,17 @@ def __call__(self, dt): sig_3 = self._sigma_3 # compute rhs - phin = self.variables.phi.spline.vector + phin = self.feec_vars[0] rhs = self._stab_mat.dot(phin, out=self._rhs) rhs *= sig_2 self._rhs2 *= 0.0 - for src, coeff in zip(self.sources, self.coeffs): - if isinstance(src, StencilVector): - self._rhs2 += sig_3 * coeff * src - elif isinstance(src, FEECVariable): - v = src.spline.vector - self._rhs2 += sig_3 * coeff * self.mass_ops.M0.dot(v, out=self._tmp_src) - elif isinstance(src, AccumulatorVector): - src() # accumulate - self._rhs2 += sig_3 * coeff * src.vectors[0] + for rho in self._rho: + if isinstance(rho, tuple): + rho[0]() # accumulate + self._rhs2 += sig_3 * rho[0].vectors[0] + else: + self._rhs2 += sig_3 * rho rhs += self._rhs2 @@ -2759,7 +2788,7 @@ def __call__(self, dt): if self._info: print(info) - self.update_feec_variables(phi=out) + self.feec_vars_update(out) class Poisson(ImplicitDiffusion): @@ -2807,52 +2836,51 @@ class Poisson(ImplicitDiffusion): Parameters for the iterative solver (see ``__init__`` for details). """ - @dataclass - class Options: - # specific literals - OptsStabMat = Literal["M0", "M0ad", "Id"] - # propagator options - stab_eps: float = 0.0 - stab_mat: OptsStabMat = "Id" - rho: FEECVariable | Callable | tuple[AccumulatorVector, Particles] | list = None - rho_coeffs: float | list = None - x0: StencilVector = None - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.stab_mat, self.OptsStabMat) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - # Poisson solve (-> set some params of parent class) - self.sigma_1 = self.stab_eps - self.sigma_2 = 0.0 - self.sigma_3 = 1.0 - self.divide_by_dt = False - self.diffusion_mat = "M1" + @staticmethod + def options(default=False): + dct = {} + dct["stabilization"] = { + "stab_eps": 0.0, + "stab_mat": ["Id", "M0", "M0ad"], + } + dct["solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) + + return dct - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - if "sigma" not in k and k not in ("divide_by_dt", "diffusion_mat"): - print(f" {k}: {v}") - self._options = new + def __init__( + self, + phi: StencilVector, + *, + stab_eps: float = 0.0, + stab_mat: str = options(default=True)["stabilization"]["stab_mat"], + rho: StencilVector | tuple | list | Callable = None, + x0: StencilVector = None, + solver: dict = options(default=True)["solver"], + ): + super().__init__( + phi, + sigma_1=stab_eps, + sigma_2=0.0, + sigma_3=1.0, + divide_by_dt=False, + stab_mat=stab_mat, + diffusion_mat="M1", + rho=rho, + x0=x0, + solver=solver, + ) class VariationalMomentumAdvection(Propagator): @@ -2885,73 +2913,50 @@ class VariationalMomentumAdvection(Propagator): \hat{\mathbf{u}}_h^{n+1/2} = (\mathbf{u}^{n+1/2})^\top \vec{\boldsymbol \Lambda}^v \in (V_h^0)^3 \,, \qquad \hat{\mathbf A}^1_{\mu,h} = \nabla P_\mu((\mathbf u^{n+1/2})^\top \vec{\boldsymbol \Lambda}^v)] \in V_h^1\,, \qquad \hat{\rho}_h^{n} = (\rho^{n})^\top \vec{\boldsymbol \Lambda}^3 \in V_h^3 \,. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1vec" - self._u = new + @staticmethod + def options(default=False): + dct = {} + dct["lin_solver"] = { + "tol": 1e-12, + "maxiter": 500, + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "verbose": False, + } + dct["nonlin_solver"] = { + "tol": 1e-8, + "maxiter": 100, + "type": ["Newton", "Picard"], + "info": False, + } + if default: + dct = descend_options_dict(dct, []) + return dct - def __init__(self): - self.variables = self.Variables() + def __init__( + self, + u: BlockVector, + *, + mass_ops: H1vecMassMatrix_density, + lin_solver: dict = options(default=True)["lin_solver"], + nonlin_solver: dict = options(default=True)["nonlin_solver"], + ): + super().__init__(u) - @dataclass - class Options: - # propagator options - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - nonlin_solver: NonlinearSolverParameters = None + assert mass_ops is not None - def __post_init__(self): - # checks - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) + self._lin_solver = lin_solver + self._nonlin_solver = nonlin_solver - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() + self._info = self._nonlin_solver["info"] and (MPI.COMM_WORLD.Get_rank() == 0) - if self.nonlin_solver is None: - self.nonlin_solver = NonlinearSolverParameters() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._lin_solver = self.options.solver_params - self._nonlin_solver = self.options.nonlin_solver - - self._info = self._nonlin_solver.info and (MPI.COMM_WORLD.Get_rank() == 0) - - self._Mrho = self.mass_ops.WMM - self._Mrho.inv._options["pc"] = MassMatrixDiagonalPreconditioner(self._Mrho.massop) + self._Mrho = mass_ops self._initialize_mass() # bunch of temporaries to avoid allocating in the loop - u = self.variables.u.spline.vector - self._tmp_un1 = u.space.zeros() self._tmp_un12 = u.space.zeros() self._tmp_diff = u.space.zeros() @@ -2967,25 +2972,25 @@ def allocate(self): self.inv_derivative = inverse( self._Mrho.inv @ self.derivative, "gmres", - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, - verbose=self._lin_solver.verbose, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], + verbose=self._lin_solver["verbose"], recycle=True, ) def __call__(self, dt): - if self._nonlin_solver.type == "Newton": + if self._nonlin_solver["type"] == "Newton": self.__call_newton(dt) - elif self._nonlin_solver.type == "Picard": + elif self._nonlin_solver["type"] == "Picard": self.__call_picard(dt) def __call_newton(self, dt): # Initialize variable for Newton iteration - un = self.variables.u.spline.vector + un = self.feec_vars[0] mn = self._Mrho.massop.dot(un, out=self._tmp_mn) mn1 = mn.copy(out=self._tmp_mn1) un1 = un.copy(out=self._tmp_un1) - tol = self.options.nonlin_solver.tol + tol = self._nonlin_solver["tol"] err = tol + 1 # Jacobian matrix for Newton solve self._dt2_brack._scalar = dt / 2 @@ -2993,7 +2998,7 @@ def __call_newton(self, dt): print() print("Newton iteration in VariationalMomentumAdvection") - for it in range(self.options.nonlin_solver.maxiter): + for it in range(self._nonlin_solver["maxiter"]): un12 = un.copy(out=self._tmp_un12) un12 += un1 un12 *= 0.5 @@ -3013,7 +3018,7 @@ def __call_newton(self, dt): if self._info: print("iteration : ", it, " error : ", err) - if err < tol**2 or xp.isnan(err): + if err < tol**2 or np.isnan(err): break # Newton step @@ -3027,26 +3032,26 @@ def __call_newton(self, dt): un1 -= update mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - if it == self.options.nonlin_solver.maxiter - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!WARNING: Maximum iteration in VariationalMomentumAdvection reached - not converged \n {err =} \n {tol**2 =}", + f"!!!WARNING: Maximum iteration in VariationalMomentumAdvection reached - not converged \n {err = } \n {tol**2 = }", ) - self.update_feec_variables(u=un1) + self.feec_vars_update(un1) def __call_picard(self, dt): # Initialize variable for Picard iteration - un = self.variables.u.spline.vector + un = self.feec_vars[0] mn = self._Mrho.massop.dot(un, out=self._tmp_mn) mn1 = mn.copy(out=self._tmp_mn1) un1 = un.copy(out=self._tmp_un1) - tol = self.options.nonlin_solver.tol + tol = self._nonlin_solver["tol"] err = tol + 1 # Jacobian matrix for Newton solve - for it in range(self.options.nonlin_solver.maxiter): + for it in range(self._nonlin_solver["maxiter"]): # Picard iteration - if err < tol**2 or xp.isnan(err): + if err < tol**2 or np.isnan(err): break # half time step approximation un12 = un.copy(out=self._tmp_un12) @@ -3073,12 +3078,12 @@ def __call_picard(self, dt): # Inverse the mass matrix to get the velocity un1 = self._Mrho.inv.dot(mn1, out=self._tmp_un1) - if it == self.options.nonlin_solver.maxiter - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!WARNING: Maximum iteration in VariationalMomentumAdvection reached - not converged \n {err =} \n {tol**2 =}", + f"!!!WARNING: Maximum iteration in VariationalMomentumAdvection reached - not converged \n {err = } \n {tol**2 = }", ) - self.update_feec_variables(u=un1) + self.feec_vars_update(un1) def _initialize_mass(self): """Initialization of the mass matrix solver""" @@ -3144,45 +3149,55 @@ class VariationalDensityEvolve(Propagator): .. math:: \hat{l}^3(f)_{ijk}=\int_{\hat{\Omega}} f \Lambda^3_{ijk} \textrm d \boldsymbol \eta - and the weights in the the :class:`~struphy.feec.basis_projection_ops.BasisProjectionOperator` and the :class:`~struphy.feec.mass.WeightedMassOperator` are given by + and the weights in the the :class:`~struphy.feec.basis_projection_ops.BasisProjectionOperator` and the :class:`~struphy.feec.mass.WeightedMassOperator` are given by + + .. math:: + + \hat{\mathbf{u}}_h^{k} = (\mathbf{u}^{k})^\top \vec{\boldsymbol \Lambda}^v \in (V_h^0)^3 \, \text{for k in} \{n, n+1/2, n+1\}, \qquad \hat{\rho}_h^{k} = (\rho^{k})^\top \vec{\boldsymbol \Lambda}^3 \in V_h^3 \, \text{for k in} \{n, n+1/2, n+1\} . + """ + + @staticmethod + def options(default=False): + dct = {} + dct["lin_solver"] = { + "tol": 1e-12, + "maxiter": 500, + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "verbose": False, + "recycle": True, + } + dct["nonlin_solver"] = { + "tol": 1e-8, + "maxiter": 100, + "info": False, + "linearize": False, + } + dct["physics"] = {"gamma": 5 / 3} + + if default: + dct = descend_options_dict(dct, []) - .. math:: + return dct - \hat{\mathbf{u}}_h^{k} = (\mathbf{u}^{k})^\top \vec{\boldsymbol \Lambda}^v \in (V_h^0)^3 \, \text{for k in} \{n, n+1/2, n+1\}, \qquad \hat{\rho}_h^{k} = (\rho^{k})^\top \vec{\boldsymbol \Lambda}^3 \in V_h^3 \, \text{for k in} \{n, n+1/2, n+1\} . - """ + def __init__( + self, + rho: StencilVector, + u: BlockVector, + *, + model: str = "barotropic", + gamma: float = options()["physics"]["gamma"], + s: StencilVector = None, + mass_ops: H1vecMassMatrix_density, + lin_solver: dict = options(default=True)["lin_solver"], + nonlin_solver: dict = options(default=True)["nonlin_solver"], + energy_evaluator: InternalEnergyEvaluator = None, + ): + super().__init__(rho, u) - class Variables: - def __init__(self): - self._rho: FEECVariable = None - self._u: FEECVariable = None - - @property - def rho(self) -> FEECVariable: - return self._rho - - @rho.setter - def rho(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._rho = new - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1vec" - self._u = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsModel = Literal[ + assert model in [ "pressureless", "barotropic", "full", @@ -3193,69 +3208,27 @@ class Options: "linear_q", "deltaf_q", ] - # propagator options - model: OptsModel = "barotropic" - gamma: float = 5.0 / 3.0 - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - nonlin_solver: NonlinearSolverParameters = None - s: FEECVariable = None - - def __post_init__(self): - # checks - check_option(self.model, self.OptsModel) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.nonlin_solver is None: - self.nonlin_solver = NonlinearSolverParameters() + if model == "full": + assert s is not None + assert mass_ops is not None - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - if self.options.model == "full": - assert self.options.s is not None - - self._model = self.options.model - self._gamma = self.options.gamma - self._s = self.options.s - self._lin_solver = self.options.solver_params - self._nonlin_solver = self.options.nonlin_solver - self._linearize = self.options.nonlin_solver.linearize - - self._info = self.options.nonlin_solver.info and (MPI.COMM_WORLD.Get_rank() == 0) - - self._Mrho = self.mass_ops.WMM - self._Mrho.inv._options["pc"] = MassMatrixDiagonalPreconditioner(self._Mrho.massop) + self._model = model + self._gamma = gamma + self._s = s + self._lin_solver = lin_solver + self._nonlin_solver = nonlin_solver + self._linearize = self._nonlin_solver["linearize"] + + self._info = self._nonlin_solver["info"] and (MPI.COMM_WORLD.Get_rank() == 0) + + self._Mrho = mass_ops # Femfields for the projector self.rhof = self.derham.create_spline_function("rhof", "L2") self.rhof1 = self.derham.create_spline_function("rhof1", "L2") - rho = self.variables.rho.spline.vector - u = self.variables.u.spline.vector - # Projector - self._energy_evaluator = InternalEnergyEvaluator(self.derham, self._gamma) + self._energy_evaluator = energy_evaluator self._kinetic_evaluator = KineticEnergyEvaluator(self.derham, self.domain, self.mass_ops) self._initialize_projectors_and_mass() if self._model in ["linear", "linear_q"]: @@ -3291,7 +3264,6 @@ def allocate(self): if self._model in ["linear", "linear_q"]: self._update_Pirho(self.projected_equil.n3) - @profile def __call__(self, dt): self.__call_newton(dt) @@ -3303,15 +3275,15 @@ def __call_newton(self, dt): print("Newton iteration in VariationalDensityEvolve") # Initial variables - rhon = self.variables.rho.spline.vector - un = self.variables.u.spline.vector + rhon = self.feec_vars[0] + un = self.feec_vars[1] if self._model in ["linear", "linear_q"]: advection = self.divPirho.dot(un, out=self._tmp_rho_advection) advection *= dt rhon1 = rhon.copy(out=self._tmp_rhon1) rhon1 -= advection - self.update_feec_variables(rho=rhon1, u=un) + self.feec_vars_update(rhon1, un) return if self._model in ["deltaf", "deltaf_q"]: @@ -3324,7 +3296,7 @@ def __call_newton(self, dt): # Initialize variable for Newton iteration if self._model == "full": - s = self._s.spline.vector + s = self._s else: s = None @@ -3346,10 +3318,10 @@ def __call_newton(self, dt): un1 = un.copy(out=self._tmp_un1) un1 += self._tmp_un_diff mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - tol = self._nonlin_solver.tol + tol = self._nonlin_solver["tol"] err = tol + 1 - for it in range(self._nonlin_solver.maxiter): + for it in range(self._nonlin_solver["maxiter"]): # Newton iteration un12 = un.copy(out=self._tmp_un12) @@ -3395,7 +3367,7 @@ def __call_newton(self, dt): if self._info: print("iteration : ", it, " error : ", err) - if err < tol**2 or xp.isnan(err): + if err < tol**2 or np.isnan(err): break # Derivative for Newton @@ -3425,14 +3397,14 @@ def __call_newton(self, dt): mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - if it == self._nonlin_solver.maxiter - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!Warning: Maximum iteration in VariationalDensityEvolve reached - not converged:\n {err =} \n {tol**2 =}", + f"!!!Warning: Maximum iteration in VariationalDensityEvolve reached - not converged:\n {err = } \n {tol**2 = }", ) self._tmp_un_diff = un1 - un self._tmp_rhon_diff = rhon1 - rhon - self.update_feec_variables(rho=rhon1, u=un1) + self.feec_vars_update(rhon1, un1) def _initialize_projectors_and_mass(self): """Initialization of all the `BasisProjectionOperator` and `CoordinateProjector` needed to compute the bracket term""" @@ -3468,7 +3440,7 @@ def _initialize_projectors_and_mass(self): # tmps grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._rhof_values = xp.zeros(grid_shape, dtype=float) + self._rhof_values = np.zeros(grid_shape, dtype=float) # Other mass matrices for newton solve self._M_drho = self.mass_ops.create_weighted_mass("L2", "L2") @@ -3501,17 +3473,17 @@ def _initialize_projectors_and_mass(self): self._Jacobian, "pbicgstab", pc=self._Mrho.inv, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, - verbose=self._lin_solver.verbose, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], + verbose=self._lin_solver["verbose"], recycle=True, ) # self._inv_Jacobian = inverse(self._Jacobian, # 'gmres', - # tol=self._lin_solver.tol, - # maxiter=self._lin_solver.maxiter, - # verbose=self._lin_solver.verbose, + # tol=self._lin_solver['tol'], + # maxiter=self._lin_solver['maxiter'], + # verbose=self._lin_solver['verbose'], # recycle=True) # L2-projector for V3 @@ -3520,20 +3492,20 @@ def _initialize_projectors_and_mass(self): grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) # tmps - self._eval_dl_drho = xp.zeros(grid_shape, dtype=float) + self._eval_dl_drho = np.zeros(grid_shape, dtype=float) - self._uf_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._uf1_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._uf_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._uf1_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] - self._tmp_int_grid = xp.zeros(grid_shape, dtype=float) - self._tmp_int_grid2 = xp.zeros(grid_shape, dtype=float) - self._rhof_values = xp.zeros(grid_shape, dtype=float) - self._rhof1_values = xp.zeros(grid_shape, dtype=float) + self._tmp_int_grid = np.zeros(grid_shape, dtype=float) + self._tmp_int_grid2 = np.zeros(grid_shape, dtype=float) + self._rhof_values = np.zeros(grid_shape, dtype=float) + self._rhof1_values = np.zeros(grid_shape, dtype=float) if self._model == "full": - self._tmp_de_drho = xp.zeros(grid_shape, dtype=float) + self._tmp_de_drho = np.zeros(grid_shape, dtype=float) gam = self._gamma - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -3541,7 +3513,7 @@ def _initialize_projectors_and_mass(self): ) self._proj_rho2_metric_term = deepcopy(metric) - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -3550,7 +3522,7 @@ def _initialize_projectors_and_mass(self): self._proj_drho_metric_term = deepcopy(metric) if self._linearize: - self._init_dener_drho = xp.zeros(grid_shape, dtype=float) + self._init_dener_drho = np.zeros(grid_shape, dtype=float) def _update_Pirho(self, rho): """Update the weights of the `BasisProjectionOperator` Pirho""" @@ -3564,7 +3536,7 @@ def _update_weighted_MM(self, rho): self._Mrho.update_weight(rho) def _update_linear_form_dl_drho(self, rhon, rhon1, un, un1, sn): - """Update the linearform representing integration in V3 against kinetic energy""" + """Update the linearform representing integration in V3 against kynetic energy""" self._kinetic_evaluator.get_u2_grid(un, un1, self._eval_dl_drho) @@ -3605,15 +3577,11 @@ def _update_linear_form_dl_drho(self, rhon, rhon1, un, un1, sn): def _compute_init_linear_form(self): if abs(self._gamma - 5 / 3) < 1e-3: self._energy_evaluator.evaluate_exact_de_drho_grid( - self.projected_equil.n3, - self.projected_equil.s3_monoatomic, - out=self._init_dener_drho, + self.projected_equil.n3, self.projected_equil.s3_monoatomic, out=self._init_dener_drho ) elif abs(self._gamma - 7 / 5) < 1e-3: self._energy_evaluator.evaluate_exact_de_drho_grid( - self.projected_equil.n3, - self.projected_equil.s3_diatomic, - out=self._init_dener_drho, + self.projected_equil.n3, self.projected_equil.s3_diatomic, out=self._init_dener_drho ) else: raise ValueError("Gamma should be 7/5 or 5/3 for if you want to linearize") @@ -3694,100 +3662,67 @@ class VariationalEntropyEvolve(Propagator): \hat{\mathbf{u}}_h^{k} = (\mathbf{u}^{k})^\top \vec{\boldsymbol \Lambda}^v \in (V_h^0)^3 \, \text{for k in} \{n, n+1/2, n+1\}, \qquad \hat{s}_h^{k} = (s^{k})^\top \vec{\boldsymbol \Lambda}^3 \in V_h^3 \, \text{for k in} \{n, n+1/2, n+1\} \qquad \hat{\rho}_h^{n} = (\rho^{n})^\top \vec{\boldsymbol \Lambda}^3 \in V_h^3 \. """ - class Variables: - def __init__(self): - self._s: FEECVariable = None - self._u: FEECVariable = None - - @property - def s(self) -> FEECVariable: - return self._s - - @s.setter - def s(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._s = new - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1vec" - self._u = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsModel = Literal["full"] - # propagator options - model: OptsModel = "full" - gamma: float = 5.0 / 3.0 - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - nonlin_solver: NonlinearSolverParameters = None - rho: FEECVariable = None - - def __post_init__(self): - # checks - check_option(self.model, self.OptsModel) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.nonlin_solver is None: - self.nonlin_solver = NonlinearSolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["lin_solver"] = { + "tol": 1e-12, + "maxiter": 500, + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "verbose": False, + } + dct["nonlin_solver"] = { + "tol": 1e-8, + "maxiter": 100, + "info": False, + "linearize": "False", + } + dct["physics"] = {"gamma": 5 / 3} + + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - if self.options.model == "full": - assert self.options.rho is not None - - self._model = self.options.model - self._gamma = self.options.gamma - self._rho = self.options.rho - self._lin_solver = self.options.solver_params - self._nonlin_solver = self.options.nonlin_solver - self._linearize = self.options.nonlin_solver.linearize - - self._info = self._nonlin_solver.info and (MPI.COMM_WORLD.Get_rank() == 0) - - self._Mrho = self.mass_ops.WMM - self._Mrho.inv._options["pc"] = MassMatrixDiagonalPreconditioner(self._Mrho.massop) + return dct + + def __init__( + self, + s: StencilVector, + u: BlockVector, + *, + model: str = "full", + gamma: float = options()["physics"]["gamma"], + rho: StencilVector, + mass_ops: H1vecMassMatrix_density, + lin_solver: dict = options(default=True)["lin_solver"], + nonlin_solver: dict = options(default=True)["nonlin_solver"], + energy_evaluator: InternalEnergyEvaluator = None, + ): + super().__init__(s, u) + + assert model in ["full"] + if model == "full": + assert rho is not None + assert mass_ops is not None + + self._model = model + self._gamma = gamma + self._rho = rho + self._lin_solver = lin_solver + self._nonlin_solver = nonlin_solver + self._linearize = self._nonlin_solver["linearize"] + + self._info = self._nonlin_solver["info"] and (MPI.COMM_WORLD.Get_rank() == 0) + + self._Mrho = mass_ops # Projector - self._energy_evaluator = InternalEnergyEvaluator(self.derham, self._gamma) + self._energy_evaluator = energy_evaluator self._initialize_projectors_and_mass() # bunch of temporaries to avoid allocating in the loop - s = self.variables.s.spline.vector - u = self.variables.u.spline.vector - self._tmp_un1 = u.space.zeros() self._tmp_un2 = u.space.zeros() self._tmp_un12 = u.space.zeros() @@ -3815,12 +3750,12 @@ def __call_newton(self, dt): if self._info: print() print("Newton iteration in VariationalEntropyEvolve") - sn = self.variables.s.spline.vector - un = self.variables.u.spline.vector + sn = self.feec_vars[0] + un = self.feec_vars[1] sn1 = sn.copy(out=self._tmp_sn1) # Initialize variable for Newton iteration - rho = self._rho.spline.vector + rho = self._rho self._update_Pis(sn) mn = self._Mrho.massop.dot(un, out=self._tmp_mn) @@ -3829,10 +3764,10 @@ def __call_newton(self, dt): un1 = un.copy(out=self._tmp_un1) un1 += self._tmp_un_diff mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - tol = self._nonlin_solver.tol + tol = self._nonlin_solver["tol"] err = tol + 1 - for it in range(self._nonlin_solver.maxiter): + for it in range(self._nonlin_solver["maxiter"]): # Newton iteration un12 = un.copy(out=self._tmp_un12) @@ -3870,7 +3805,7 @@ def __call_newton(self, dt): if self._info: print("iteration : ", it, " error : ", err) - if err < tol**2 or xp.isnan(err): + if err < tol**2 or np.isnan(err): break # Derivative for Newton @@ -3892,13 +3827,13 @@ def __call_newton(self, dt): # Multiply by the mass matrix to get the momentum mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - if it == self._nonlin_solver.maxiter - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!Warning: Maximum iteration in VariationalEntropyEvolve reached - not converged:\n {err =} \n {tol**2 =}", + f"!!!Warning: Maximum iteration in VariationalEntropyEvolve reached - not converged:\n {err = } \n {tol**2 = }", ) self._tmp_sn_diff = sn1 - sn self._tmp_un_diff = un1 - un - self.update_feec_variables(s=sn1, u=un1) + self.feec_vars_update(sn1, un1) def _initialize_projectors_and_mass(self): """Initialization of all the `BasisProjectionOperator` and `CoordinateProjector` needed to compute the bracket term""" @@ -3951,19 +3886,19 @@ def _initialize_projectors_and_mass(self): self._inv_Jacobian = SchurSolverFull( self._Jacobian, - self.options.solver, + self._lin_solver["type"][0], pc=self._Mrho.inv, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, - verbose=self._lin_solver.verbose, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], + verbose=self._lin_solver["verbose"], recycle=True, ) # self._inv_Jacobian = inverse(self._Jacobian, # 'gmres', - # tol=self._lin_solver.tol, - # maxiter=self._lin_solver.maxiter, - # verbose=self._lin_solver.verbose, + # tol=self._lin_solver['tol'], + # maxiter=self._lin_solver['maxiter'], + # verbose=self._lin_solver['verbose'], # recycle=True) # prepare for integration of linear form @@ -3979,15 +3914,15 @@ def _initialize_projectors_and_mass(self): ) grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._tmp_int_grid = xp.zeros(grid_shape, dtype=float) + self._tmp_int_grid = np.zeros(grid_shape, dtype=float) if self._model == "full": - self._tmp_de_ds = xp.zeros(grid_shape, dtype=float) + self._tmp_de_ds = np.zeros(grid_shape, dtype=float) if self._linearize: - self._init_dener_ds = xp.zeros(grid_shape, dtype=float) + self._init_dener_ds = np.zeros(grid_shape, dtype=float) gam = self._gamma - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -3995,7 +3930,7 @@ def _initialize_projectors_and_mass(self): ) self._proj_rho2_metric_term = deepcopy(metric) - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -4028,15 +3963,11 @@ def _update_linear_form_dl_ds(self, rhon, sn, sn1): def _compute_init_linear_form(self): if abs(self._gamma - 5 / 3) < 1e-3: self._energy_evaluator.evaluate_exact_de_ds_grid( - self.projected_equil.n3, - self.projected_equil.s3_monoatomic, - out=self._init_dener_ds, + self.projected_equil.n3, self.projected_equil.s3_monoatomic, out=self._init_dener_ds ) elif abs(self._gamma - 7 / 5) < 1e-3: self._energy_evaluator.evaluate_exact_de_ds_grid( - self.projected_equil.n3, - self.projected_equil.s3_diatomic, - out=self._init_dener_ds, + self.projected_equil.n3, self.projected_equil.s3_diatomic, out=self._init_dener_ds ) else: raise ValueError("Gamma should be 7/5 or 5/3 for if you want to linearize") @@ -4101,91 +4032,58 @@ class VariationalMagFieldEvolve(Propagator): """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - self._b: FEECVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1vec" - self._u = new - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - OptsModel = Literal["full", "full_p", "linear"] - # propagator options - model: OptsModel = "full" - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - nonlin_solver: NonlinearSolverParameters = None - - def __post_init__(self): - # checks - check_option(self.model, self.OptsModel) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.nonlin_solver is None: - self.nonlin_solver = NonlinearSolverParameters(type="Newton") + @staticmethod + def options(default=False): + dct = {} + dct["lin_solver"] = { + "tol": 1e-12, + "maxiter": 500, + "non_linear_maxiter": 100, + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "verbose": False, + } + dct["nonlin_solver"] = { + "tol": 1e-8, + "maxiter": 100, + "info": False, + "linearize": False, + } + + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._model = self.options.model - self._lin_solver = self.options.solver_params - self._nonlin_solver = self.options.nonlin_solver - self._linearize = self._nonlin_solver.linearize - - self._info = self._nonlin_solver.info and (MPI.COMM_WORLD.Get_rank() == 0) - - self._Mrho = self.mass_ops.WMM - self._Mrho.inv._options["pc"] = MassMatrixDiagonalPreconditioner(self._Mrho.massop) + return dct + + def __init__( + self, + b: BlockVector, + u: BlockVector, + *, + model: str = "full", + mass_ops: H1vecMassMatrix_density, + lin_solver: dict = options(default=True)["lin_solver"], + nonlin_solver: dict = options(default=True)["nonlin_solver"], + ): + super().__init__(b, u) + + assert model in ["full", "full_p", "linear"] + self._model = model + self._mass_ops = mass_ops + self._lin_solver = lin_solver + self._nonlin_solver = nonlin_solver + self._linearize = self._nonlin_solver["linearize"] + + self._info = self._nonlin_solver["info"] and (MPI.COMM_WORLD.Get_rank() == 0) + + self._Mrho = mass_ops # Projector self._initialize_projectors_and_mass() # bunch of temporaries to avoid allocating in the loop - u = self.variables.u.spline.vector - b = self.variables.b.spline.vector - self._tmp_un1 = u.space.zeros() self._tmp_un2 = u.space.zeros() self._tmp_un12 = u.space.zeros() @@ -4216,8 +4114,8 @@ def __call_newton(self, dt): print() print("Newton iteration in VariationalMagFieldEvolve") # Compute implicit approximation of s^{n+1} - un = self.variables.u.spline.vector - bn = self.variables.b.spline.vector + bn = self.feec_vars[0] + un = self.feec_vars[1] bn1 = bn.copy(out=self._tmp_bn1) # Initialize variable for Newton iteration @@ -4230,10 +4128,10 @@ def __call_newton(self, dt): un1 = un.copy(out=self._tmp_un1) un1 += self._tmp_un_diff mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - tol = self._nonlin_solver.tol + tol = self._nonlin_solver["tol"] err = tol + 1 - for it in range(self._nonlin_solver.maxiter): + for it in range(self._nonlin_solver["maxiter"]): # Newton iteration # half time step approximation bn12 = bn.copy(out=self._tmp_bn12) @@ -4294,7 +4192,7 @@ def __call_newton(self, dt): if self._info: print("iteration : ", it, " error : ", err) - if err < tol**2 or xp.isnan(err): + if err < tol**2 or np.isnan(err): break # Derivative for Newton @@ -4316,18 +4214,20 @@ def __call_newton(self, dt): # Multiply by the mass matrix to get the momentum mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - if it == self._nonlin_solver.maxiter - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!Warning: Maximum iteration in VariationalMagFieldEvolve reached - not converged:\n {err =} \n {tol**2 =}", + f"!!!Warning: Maximum iteration in VariationalMagFieldEvolve reached - not converged:\n {err = } \n {tol**2 = }", ) self._tmp_un_diff = un1 - un self._tmp_bn_diff = bn1 - bn - self.update_feec_variables(b=bn1, u=un1) + self.feec_vars_update(bn1, un1) def _initialize_projectors_and_mass(self): """Initialization of all the `BasisProjectionOperator` and needed to compute the bracket term""" + from struphy.feec.variational_utilities import Hdiv0_transport_operator + self.curlPib = Hdiv0_transport_operator(self.derham) self.curlPibT = self.curlPib.T @@ -4376,13 +4276,15 @@ def _initialize_projectors_and_mass(self): self._Jacobian[1, 0] = self._dt2_curlPib self._Jacobian[1, 1] = self._I2 + from struphy.linear_algebra.schur_solver import SchurSolverFull + self._inv_Jacobian = SchurSolverFull( self._Jacobian, - self.options.solver, + self._lin_solver["type"][0], pc=self._Mrho.inv, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, - verbose=self._lin_solver.verbose, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], + verbose=self._lin_solver["verbose"], recycle=True, ) @@ -4400,6 +4302,8 @@ def _update_Pib(self, b): self.curlPibT.update_coeffs(b) def _create_Pib0(self): + from struphy.feec.variational_utilities import Hdiv0_transport_operator + self.curlPib0 = Hdiv0_transport_operator(self.derham) self.curlPibT0 = self.curlPib0.T @@ -4485,130 +4389,72 @@ class VariationalPBEvolve(Propagator): and :math:`\mathcal{U}^v` is :class:`~struphy.feec.basis_projection_ops.BasisProjectionOperators`. """ - class Variables: - def __init__(self): - self._p: FEECVariable = None - self._u: FEECVariable = None - self._b: FEECVariable = None - - @property - def p(self) -> FEECVariable: - return self._p - - @p.setter - def p(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._p = new - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1vec" - self._u = new - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsModel = Literal["full_p", "linear", "deltaf"] - # propagator options - model: OptsModel = "full_p" - gamma: float = 5.0 / 3.0 - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - nonlin_solver: NonlinearSolverParameters = None - div_u: FEECVariable = None - u2: FEECVariable = None - pt3: FEECVariable = None - bt2: FEECVariable = None - - def __post_init__(self): - # checks - check_option(self.model, self.OptsModel) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.nonlin_solver is None: - self.nonlin_solver = NonlinearSolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["lin_solver"] = { + "tol": 1e-12, + "maxiter": 500, + "non_linear_maxiter": 100, + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "verbose": False, + } + dct["nonlin_solver"] = { + "tol": 1e-8, + "maxiter": 100, + "type": ["Picard"], + "info": False, + "linearize": False, + } + dct["physics"] = {"gamma": 5 / 3} - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._model = self.options.model - self._lin_solver = self.options.solver_params - self._nonlin_solver = self.options.nonlin_solver - self._linearize = self.options.nonlin_solver.linearize - self._gamma = self.options.gamma - - if self.options.div_u is None: - self._divu = None - else: - self._divu = self.options.div_u.spline.vector + if default: + dct = descend_options_dict(dct, []) - if self.options.u2 is None: - self._u2 = None - else: - self._u2 = self.options.u2.spline.vector + return dct - if self.options.pt3 is None: - self._pt3 = None - else: - self._pt3 = self.options.pt3.spline.vector + def __init__( + self, + p: StencilVector, + b: BlockVector, + u: BlockVector, + *, + model: str = "full", + gamma: float = options()["physics"]["gamma"], + mass_ops: H1vecMassMatrix_density, + lin_solver: dict = options(default=True)["lin_solver"], + nonlin_solver: dict = options(default=True)["nonlin_solver"], + div_u: StencilVector | None = None, + u2: BlockVector | None = None, + pt3: StencilVector | None = None, + bt2: BlockVector | None = None, + ): + super().__init__(p, b, u) - if self.options.bt2 is None: - self._bt2 = None - else: - self._bt2 = self.options.bt2.spline.vector + assert model in ["full_p", "linear", "deltaf"] + self._model = model + self._mass_ops = mass_ops + self._lin_solver = lin_solver + self._nonlin_solver = nonlin_solver + self._linearize = self._nonlin_solver["linearize"] + self._gamma = gamma - self._info = self._nonlin_solver.info and (MPI.COMM_WORLD.Get_rank() == 0) + self._divu = div_u + self._u2 = u2 + self._pt3 = pt3 + self._bt2 = bt2 - self._Mrho = self.mass_ops.WMM - self._Mrho.inv._options["pc"] = MassMatrixDiagonalPreconditioner(self._Mrho.massop) + self._info = self._nonlin_solver["info"] and (MPI.COMM_WORLD.Get_rank() == 0) + + self._Mrho = mass_ops # Projector self._initialize_projectors_and_mass() # bunch of temporaries to avoid allocating in the loop - u = self.variables.u.spline.vector - p = self.variables.p.spline.vector - b = self.variables.b.spline.vector - self._tmp_un1 = u.space.zeros() self._tmp_un2 = u.space.zeros() self._tmp_un12 = u.space.zeros() @@ -4642,7 +4488,7 @@ def allocate(self): self._extracted_b2 = self.derham.extraction_ops["2"].dot(self.projected_equil.b2) def __call__(self, dt): - if self._nonlin_solver.type == "Picard": + if self._nonlin_solver["type"] == "Picard": self.__call_picard(dt) else: raise ValueError("Only Picard solver is implemented for VariationalPBEvolve") @@ -4654,9 +4500,9 @@ def __call_picard(self, dt): print() print("Newton iteration in VariationalPBEvolve") - un = self.variables.u.spline.vector - pn = self.variables.p.spline.vector - bn = self.variables.b.spline.vector + pn = self.feec_vars[0] + bn = self.feec_vars[1] + un = self.feec_vars[2] self._update_Pib(bn) self._update_Projp(pn) @@ -4669,10 +4515,10 @@ def __call_picard(self, dt): un1 = un.copy(out=self._tmp_un1) un1 += self._tmp_un_diff mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - tol = self._nonlin_solver.tol + tol = self._nonlin_solver["tol"] err = tol + 1 - for it in range(self._nonlin_solver.maxiter): + for it in range(self._nonlin_solver["maxiter"]): # Picard iteration # half time step approximation @@ -4797,7 +4643,7 @@ def __call_picard(self, dt): if self._info: print("iteration : ", it, " error : ", err) - if err < tol**2 or xp.isnan(err): + if err < tol**2 or np.isnan(err): break # Derivative for Newton @@ -4819,15 +4665,15 @@ def __call_picard(self, dt): # Multiply by the mass matrix to get the momentum mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - if it == self._nonlin_solver.maxiter - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!Warning: Maximum iteration in VariationalPBEvolve reached - not converged:\n {err =} \n {tol**2 =}", + f"!!!Warning: Maximum iteration in VariationalPBEvolve reached - not converged:\n {err = } \n {tol**2 = }", ) self._tmp_un_diff = un1 - un self._tmp_bn_diff = bn1 - bn self._tmp_pn_diff = pn1 - pn - self.update_feec_variables(p=pn1, b=bn1, u=un1) + self.feec_vars_update(pn1, bn1, un1) self._transop_p.div.dot(un12, out=self._divu) self._transop_p._Uv.dot(un1, out=self._u2) @@ -4853,6 +4699,9 @@ def __call_picard(self, dt): def _initialize_projectors_and_mass(self): """Initialization of all the `BasisProjectionOperator` and needed to compute the bracket term""" + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import Hdiv0_transport_operator, Pressure_transport_operator + self.curlPib = Hdiv0_transport_operator(self.derham) self.curlPibT = self.curlPib.T self._transop_p = Pressure_transport_operator(self.derham, self.domain, self.basis_ops.Uv, self._gamma) @@ -4868,7 +4717,7 @@ def _initialize_projectors_and_mass(self): grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._tmp_int_grid = xp.zeros(grid_shape, dtype=float) + self._tmp_int_grid = np.zeros(grid_shape, dtype=float) # Inverse mass matrix needed to compute the error self.pc_Mv = preconditioner.MassMatrixDiagonalPreconditioner( @@ -4940,11 +4789,11 @@ def _initialize_projectors_and_mass(self): self._inv_Jacobian = SchurSolverFull( self._Jacobian, - self.options.solver, + self._lin_solver["type"][0], pc=self._Mrho.inv, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, - verbose=self._lin_solver.verbose, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], + verbose=self._lin_solver["verbose"], recycle=True, ) @@ -4963,6 +4812,8 @@ def _update_Pib(self, b): self.curlPibT.update_coeffs(b) def _create_Pib0(self): + from struphy.feec.variational_utilities import Hdiv0_transport_operator + self.curlPib0 = Hdiv0_transport_operator(self.derham) self.curlPibT0 = self.curlPib.T self.curlPib0.update_coeffs(self.projected_equil.b2) @@ -4975,6 +4826,7 @@ def _update_Projp(self, p): def _create_transop0(self): """Update the weights of the `BasisProjectionOperator`""" + from struphy.feec.variational_utilities import Pressure_transport_operator self._transop_p0 = Pressure_transport_operator(self.derham, self.domain, self.basis_ops.Uv, self._gamma) self._transop_p0T = self._transop_p0.T @@ -5083,130 +4935,72 @@ class VariationalQBEvolve(Propagator): and :math:`\mathcal{U}^v` is :class:`~struphy.feec.basis_projection_ops.BasisProjectionOperators`. """ - class Variables: - def __init__(self): - self._q: FEECVariable = None - self._u: FEECVariable = None - self._b: FEECVariable = None - - @property - def q(self) -> FEECVariable: - return self._q - - @q.setter - def q(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._q = new - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1vec" - self._u = new - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsModel = Literal["full_q", "linear_q", "deltaf_q"] - # propagator options - model: OptsModel = "full_q" - gamma: float = 5.0 / 3.0 - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - nonlin_solver: NonlinearSolverParameters = None - div_u: FEECVariable = None - u2: FEECVariable = None - qt3: FEECVariable = None - bt2: FEECVariable = None - - def __post_init__(self): - # checks - check_option(self.model, self.OptsModel) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.nonlin_solver is None: - self.nonlin_solver = NonlinearSolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["lin_solver"] = { + "tol": 1e-12, + "maxiter": 500, + "non_linear_maxiter": 100, + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "verbose": False, + } + dct["nonlin_solver"] = { + "tol": 1e-8, + "maxiter": 100, + "type": ["Picard"], + "info": False, + "linearize": False, + } + dct["physics"] = {"gamma": 5 / 3} - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._model = self.options.model - self._lin_solver = self.options.solver_params - self._nonlin_solver = self.options.nonlin_solver - self._linearize = self.options.nonlin_solver.linearize - self._gamma = self.options.gamma - - if self.options.div_u is None: - self._divu = None - else: - self._divu = self.options.div_u.spline.vector + if default: + dct = descend_options_dict(dct, []) - if self.options.u2 is None: - self._u2 = None - else: - self._u2 = self.options.u2.spline.vector + return dct - if self.options.qt3 is None: - self._qt3 = None - else: - self._qt3 = self.options.qt3.spline.vector + def __init__( + self, + q: StencilVector, + b: BlockVector, + u: BlockVector, + *, + model: str = "full", + gamma: float = options()["physics"]["gamma"], + mass_ops: H1vecMassMatrix_density, + lin_solver: dict = options(default=True)["lin_solver"], + nonlin_solver: dict = options(default=True)["nonlin_solver"], + div_u: StencilVector | None = None, + u2: BlockVector | None = None, + qt3: StencilVector | None = None, + bt2: BlockVector | None = None, + ): + super().__init__(q, b, u) - if self.options.bt2 is None: - self._bt2 = None - else: - self._bt2 = self.options.bt2.spline.vector + assert model in ["full_q", "linear_q", "deltaf_q"] + self._model = model + self._mass_ops = mass_ops + self._lin_solver = lin_solver + self._nonlin_solver = nonlin_solver + self._linearize = self._nonlin_solver["linearize"] + self._gamma = gamma - self._info = self._nonlin_solver.info and (self.rank == 0) + self._divu = div_u + self._u2 = u2 + self._qt3 = qt3 + self._bt2 = bt2 - self._Mrho = self.mass_ops.WMM - self._Mrho.inv._options["pc"] = MassMatrixDiagonalPreconditioner(self._Mrho.massop) + self._info = self._nonlin_solver["info"] and (self.rank == 0) + + self._Mrho = mass_ops # Projector self._initialize_projectors_and_mass() # bunch of temporaries to avoid allocating in the loop - u = self.variables.u.spline.vector - q = self.variables.q.spline.vector - b = self.variables.b.spline.vector - self._tmp_un1 = u.space.zeros() self._tmp_un12 = u.space.zeros() self._tmp_bn1 = b.space.zeros() @@ -5238,7 +5032,7 @@ def allocate(self): self._extracted_q3 = self.derham.extraction_ops["3"].dot(self.projected_equil.q3) def __call__(self, dt): - if self._nonlin_solver.type == "Picard": + if self._nonlin_solver["type"] == "Picard": self.__call_picard(dt) else: raise ValueError("Only Picard solver is implemented for VariationalQBEvolve") @@ -5250,9 +5044,9 @@ def __call_picard(self, dt): print() print("Newton iteration in VariationalQBEvolve") - un = self.variables.u.spline.vector - qn = self.variables.q.spline.vector - bn = self.variables.b.spline.vector + qn = self.feec_vars[0] + bn = self.feec_vars[1] + un = self.feec_vars[2] self._update_Pib(bn) self._update_Projq(qn) @@ -5265,10 +5059,10 @@ def __call_picard(self, dt): un1 = un.copy(out=self._tmp_un1) un1 += self._tmp_un_diff mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - tol = self._nonlin_solver.tol + tol = self._nonlin_solver["tol"] err = tol + 1 - for it in range(self._nonlin_solver.maxiter): + for it in range(self._nonlin_solver["maxiter"]): # Picard iteration # half time step approximation @@ -5386,7 +5180,7 @@ def __call_picard(self, dt): if self._info: print("iteration : ", it, " error : ", err) - if err < tol**2 or xp.isnan(err): + if err < tol**2 or np.isnan(err): break # Derivative for Newton @@ -5410,15 +5204,15 @@ def __call_picard(self, dt): # Multiply by the mass matrix to get the momentum mn1 = self._Mrho.massop.dot(un1, out=self._tmp_mn1) - if it == self._nonlin_solver.maxiter - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!Warning: Maximum iteration in VariationalPBEvolve reached - not converged:\n {err =} \n {tol**2 =}", + f"!!!Warning: Maximum iteration in VariationalPBEvolve reached - not converged:\n {err = } \n {tol**2 = }", ) self._tmp_un_diff = un1 - un self._tmp_bn_diff = bn1 - bn self._tmp_qn_diff = qn1 - qn - self.update_feec_variables(q=qn1, b=bn1, u=un1) + self.feec_vars_update(qn1, bn1, un1) self._transop_q.div.dot(un12, out=self._divu) self._transop_q._Uv.dot(un1, out=self._u2) @@ -5444,6 +5238,9 @@ def __call_picard(self, dt): def _initialize_projectors_and_mass(self): """Initialization of all the `BasisProjectionOperator` and needed to compute the bracket term""" + from struphy.feec.projectors import L2Projector + from struphy.feec.variational_utilities import Hdiv0_transport_operator, Pressure_transport_operator + self.curlPib = Hdiv0_transport_operator(self.derham) self.curlPibT = self.curlPib.T self._transop_q = Pressure_transport_operator(self.derham, self.domain, self.basis_ops.Uv, self._gamma / 2.0) @@ -5459,7 +5256,7 @@ def _initialize_projectors_and_mass(self): grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._tmp_int_grid = xp.zeros(grid_shape, dtype=float) + self._tmp_int_grid = np.zeros(grid_shape, dtype=float) # Inverse mass matrix needed to compute the error self.pc_Mv = preconditioner.MassMatrixDiagonalPreconditioner( @@ -5550,11 +5347,11 @@ def _initialize_projectors_and_mass(self): self._inv_Jacobian = SchurSolverFull3( self._Jacobian, - self.options.solver, + self._lin_solver["type"][0], pc=self._Mrho.inv, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, - verbose=self._lin_solver.verbose, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], + verbose=self._lin_solver["verbose"], recycle=True, ) @@ -5573,6 +5370,8 @@ def _update_Pib(self, b): self.curlPibT.update_coeffs(b) def _create_Pib0(self): + from struphy.feec.variational_utilities import Hdiv0_transport_operator + self.curlPib0 = Hdiv0_transport_operator(self.derham) self.curlPibT0 = self.curlPib.T self.curlPib0.update_coeffs(self.projected_equil.b2) @@ -5585,6 +5384,7 @@ def _update_Projq(self, q): def _create_transop0(self): """Update the weights of the `BasisProjectionOperator`""" + from struphy.feec.variational_utilities import Pressure_transport_operator self._transop_q0 = Pressure_transport_operator(self.derham, self.domain, self.basis_ops.Uv, self._gamma / 2.0) self._transop_q0T = self._transop_q0.T @@ -5689,95 +5489,72 @@ class VariationalViscosity(Propagator): """ - class Variables: - def __init__(self): - self._s: FEECVariable = None - self._u: FEECVariable = None - - @property - def s(self) -> FEECVariable: - return self._s - - @s.setter - def s(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._s = new - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1vec" - self._u = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsModel = Literal["full", "full_p", "full_q", "linear_p", "linear_q", "deltaf_q"] - # propagator options - model: OptsModel = "full" - gamma: float = 5.0 / 3.0 - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixDiagonalPreconditioner" - solver_params: SolverParameters = None - nonlin_solver: NonlinearSolverParameters = None - rho: FEECVariable = None - pt3: FEECVariable = None - mu: float = 0.0 - mu_a: float = 0.0 - alpha: float = 0.0 - - def __post_init__(self): - # checks - check_option(self.model, self.OptsModel) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.nonlin_solver is None: - self.nonlin_solver = NonlinearSolverParameters(type="Newton") + @staticmethod + def options(default=False): + dct = {} + dct["lin_solver"] = { + "tol": 1e-12, + "maxiter": 500, + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "verbose": False, + } + dct["nonlin_solver"] = { + "tol": 1e-8, + "maxiter": 100, + "type": ["Newton"], + "info": False, + "fast": False, + } + dct["physics"] = { + "gamma": 1.66666666667, + "mu": 0.0, + "mu_a": 0.0, + "alpha": 0.0, + } + + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._model = self.options.model - self._gamma = self.options.gamma - self._lin_solver = self.options.solver_params - self._nonlin_solver = self.options.nonlin_solver - self._mu_a = self.options.mu_a - self._alpha = self.options.alpha - self._mu = self.options.mu - self._rho = self.options.rho - self._pt3 = self.options.pt3 - - self._info = self._nonlin_solver.info and (MPI.COMM_WORLD.Get_rank() == 0) - - self._Mrho = self.mass_ops.WMM - self._Mrho.inv._options["pc"] = MassMatrixDiagonalPreconditioner(self._Mrho.massop) + return dct + + def __init__( + self, + s: StencilVector, + u: BlockVector, + *, + model: str = "barotropic", + gamma: float = options()["physics"]["gamma"], + rho: StencilVector, + mu: float = options()["physics"]["mu"], + mu_a: float = options()["physics"]["mu_a"], + alpha: float = options()["physics"]["alpha"], + mass_ops: H1vecMassMatrix_density, + lin_solver: dict = options(default=True)["lin_solver"], + nonlin_solver: dict = options(default=True)["nonlin_solver"], + energy_evaluator: InternalEnergyEvaluator = None, + pt3: StencilVector | None = None, + ): + super().__init__(s, u) + + assert model in ["full", "full_p", "full_q", "linear_p", "linear_q", "deltaf_q"] + + self._model = model + self._gamma = gamma + self._lin_solver = lin_solver + self._nonlin_solver = nonlin_solver + self._mu_a = mu_a + self._alpha = alpha + self._mu = mu + self._rho = rho + self._pt3 = pt3 + self._energy_evaluator = energy_evaluator + + self._info = self._nonlin_solver["info"] and (MPI.COMM_WORLD.Get_rank() == 0) + + self._Mrho = mass_ops # Femfields for the projector self.sf = self.derham.create_spline_function("sf", "L2") @@ -5792,13 +5569,9 @@ def allocate(self): self.gu122f = self.derham.create_spline_function("gu122", "Hcurl") # Projector - self._energy_evaluator = InternalEnergyEvaluator(self.derham, self._gamma) self._initialize_projectors_and_mass() # bunch of temporaries to avoid allocating in the loop - u = self.variables.u.spline.vector - s = self.variables.s.spline.vector - self._tmp_un1 = u.space.zeros() self._tmp_un12 = u.space.zeros() self._tmp_sn1 = s.space.zeros() @@ -5815,7 +5588,7 @@ def allocate(self): self.tot_rhs = s.space.zeros() def __call__(self, dt): - if self._nonlin_solver.type == "Newton": + if self._nonlin_solver["type"] == "Newton": self.__call_newton(dt) else: raise ValueError( @@ -5825,11 +5598,10 @@ def __call__(self, dt): def __call_newton(self, dt): """Solve the non linear system for updating the variables using Newton iteration method""" # Compute dissipation implicitely - sn = self.variables.s.spline.vector - un = self.variables.u.spline.vector - + sn = self.feec_vars[0] + un = self.feec_vars[1] if self._mu < 1.0e-15 and self._mu_a < 1.0e-15 and self._alpha < 1.0e-15: - self.update_feec_variables(s=sn, u=un) + self.feec_vars_update(sn, un) return if self._info: @@ -5847,7 +5619,7 @@ def __call_newton(self, dt): print("information on the linear solver : ", self.inv_lop._info) if self._model == "linear_p" or (self._model == "linear_q" and self._nonlin_solver["fast"]): - self.update_feec_variables(s=sn, u=un1) + self.feec_vars_update(sn, un1) return # Energy balance term @@ -5856,7 +5628,7 @@ def __call_newton(self, dt): # 2) Initial energy and linear form rho = self._rho if self._model in ["deltaf_q", "linear_q"]: - self.sf.vector = self._pt3.spline.vector + self.sf.vector = self._pt3 else: self.sf.vector = sn @@ -5912,7 +5684,7 @@ def __call_newton(self, dt): for it in range(self._nonlin_solver["maxiter"]): if self._model in ["deltaf_q", "linear_q"]: - self.sf1.vector = self._pt3.spline.vector + self.sf1.vector = self._pt3 else: self.sf1.vector = sn1 @@ -5964,7 +5736,7 @@ def __call_newton(self, dt): if self._info: print("iteration : ", it, " error : ", err) - if (err < tol**2 and it > 0) or xp.isnan(err): + if (err < tol**2 and it > 0) or np.isnan(err): # force at least one iteration break @@ -6002,16 +5774,18 @@ def __call_newton(self, dt): else: sn1 += incr - if it == self._nonlin_solver["maxiter"] - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!Warning: Maximum iteration in VariationalViscosity reached - not converged:\n {err =} \n {tol**2 =}", + f"!!!Warning: Maximum iteration in VariationalViscosity reached - not converged:\n {err = } \n {tol**2 = }", ) - self.update_feec_variables(s=sn1, u=un1) + self.feec_vars_update(sn1, un1) def _initialize_projectors_and_mass(self): """Initialization of all the `BasisProjectionOperator` and needed to compute the bracket term""" + from struphy.feec.projectors import L2Projector + Xv = getattr(self.basis_ops, "Xv") Pcoord0 = CoordinateProjector( 0, @@ -6046,12 +5820,12 @@ def _initialize_projectors_and_mass(self): self.M_de_ds = self.mass_ops.create_weighted_mass("L2", "L2") - if self.options.precond is None: + if self._lin_solver["type"][1] is None: self.pc_jac = None else: pc_class = getattr( preconditioner, - self.options.precond, + self._lin_solver["type"][1], ) self.pc_jac = pc_class(self.M_de_ds) @@ -6059,8 +5833,8 @@ def _initialize_projectors_and_mass(self): self.M_de_ds, "pcg", pc=self.pc_jac, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], verbose=False, recycle=True, ) @@ -6101,8 +5875,8 @@ def _initialize_projectors_and_mass(self): self.l_op, "pcg", pc=self._Mrho.inv, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], verbose=False, recycle=True, ) @@ -6138,35 +5912,35 @@ def _initialize_projectors_and_mass(self): grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._guf0_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._guf1_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._guf2_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._guf0_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._guf1_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._guf2_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] - self._guf120_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._guf121_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._guf122_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._guf120_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._guf121_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._guf122_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] - self._uf1_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._uf12_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._uf1_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._uf12_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] - self._gu_sq_values = xp.zeros(grid_shape, dtype=float) - self._u_sq_values = xp.zeros(grid_shape, dtype=float) - self._gu_init_values = xp.zeros(grid_shape, dtype=float) + self._gu_sq_values = np.zeros(grid_shape, dtype=float) + self._u_sq_values = np.zeros(grid_shape, dtype=float) + self._gu_init_values = np.zeros(grid_shape, dtype=float) - self._sf_values = xp.zeros(grid_shape, dtype=float) - self._sf1_values = xp.zeros(grid_shape, dtype=float) - self._rhof_values = xp.zeros(grid_shape, dtype=float) + self._sf_values = np.zeros(grid_shape, dtype=float) + self._sf1_values = np.zeros(grid_shape, dtype=float) + self._rhof_values = np.zeros(grid_shape, dtype=float) - self._e_n1 = xp.zeros(grid_shape, dtype=float) - self._e_n = xp.zeros(grid_shape, dtype=float) + self._e_n1 = np.zeros(grid_shape, dtype=float) + self._e_n = np.zeros(grid_shape, dtype=float) - self._de_s1_values = xp.zeros(grid_shape, dtype=float) + self._de_s1_values = np.zeros(grid_shape, dtype=float) - self._tmp_int_grid = xp.zeros(grid_shape, dtype=float) + self._tmp_int_grid = np.zeros(grid_shape, dtype=float) gam = self._gamma if self._model == "full": - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -6174,7 +5948,7 @@ def _initialize_projectors_and_mass(self): ) self._mass_metric_term = deepcopy(metric) - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -6207,7 +5981,7 @@ def _initialize_projectors_and_mass(self): self.pc_jac.update_mass_operator(self.M_de_ds) elif self._model in ["full_q", "linear_q", "deltaf_q"]: - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -6215,7 +5989,7 @@ def _initialize_projectors_and_mass(self): ) self._mass_metric_term = deepcopy(metric) - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -6223,7 +5997,7 @@ def _initialize_projectors_and_mass(self): ) self._energy_metric = deepcopy(metric) - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -6289,7 +6063,7 @@ def _update_artificial_viscosity(self, un, dt): gu_sq_v += gu1_v[i] gu_sq_v += gu2_v[i] - xp.sqrt(gu_sq_v, out=gu_sq_v) + np.sqrt(gu_sq_v, out=gu_sq_v) gu_sq_v *= dt * self._mu_a # /2 @@ -6447,92 +6221,63 @@ class VariationalResistivity(Propagator): """ - class Variables: - def __init__(self): - self._s: FEECVariable = None - self._b: FEECVariable = None - - @property - def s(self) -> FEECVariable: - return self._s - - @s.setter - def s(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._s = new - - @property - def b(self) -> FEECVariable: - return self._b - - @b.setter - def b(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._b = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsModel = Literal["full", "full_p", "full_q", "linear_p", "linear_q", "deltaf_q"] - # propagator options - model: OptsModel = "full" - gamma: float = 5.0 / 3.0 - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixDiagonalPreconditioner" - solver_params: SolverParameters = None - nonlin_solver: NonlinearSolverParameters = None - linearize_current: bool = False - rho: FEECVariable = None - pt3: FEECVariable = None - eta: float = 0.0 - eta_a: float = 0.0 - - def __post_init__(self): - # checks - check_option(self.model, self.OptsModel) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.nonlin_solver is None: - self.nonlin_solver = NonlinearSolverParameters(type="Newton") + @staticmethod + def options(default=False): + dct = {} + dct["lin_solver"] = { + "tol": 1e-12, + "maxiter": 500, + "type": [ + ("pcg", "MassMatrixDiagonalPreconditioner"), + ("cg", None), + ], + "verbose": False, + } + dct["nonlin_solver"] = {"tol": 1e-8, "maxiter": 100, "type": ["Newton"], "info": False, "fast": False} + dct["physics"] = { + "eta": 0.0, + "eta_a": 0.0, + "gamma": 5 / 3, + } + dct["linearize_current"] = False + + if default: + dct = descend_options_dict(dct, []) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._model = self.options.model - self._gamma = self.options.gamma - self._eta = self.options.eta - self._eta_a = self.options.eta_a - self._lin_solver = self.options.solver_params - self._nonlin_solver = self.options.nonlin_solver - self._linearize_current = self.options.linearize_current - self._rho = self.options.rho - self._pt3 = self.options.pt3 - - self._info = self._nonlin_solver.info and (MPI.COMM_WORLD.Get_rank() == 0) + return dct + + def __init__( + self, + s: StencilVector, + b: BlockVector, + *, + model: str = "full", + gamma: float = options()["physics"]["gamma"], + rho: StencilVector, + eta: float = options()["physics"]["eta"], + eta_a: float = options()["physics"]["eta_a"], + lin_solver: dict = options(default=True)["lin_solver"], + nonlin_solver: dict = options(default=True)["nonlin_solver"], + linearize_current: dict = options(default=True)["linearize_current"], + energy_evaluator: InternalEnergyEvaluator = None, + pt3: StencilVector | None = None, + ): + super().__init__(s, b) + + assert model in ["full", "full_p", "full_q", "linear_p", "delta_p", "linear_q", "deltaf_q"] + + self._energy_evaluator = energy_evaluator + self._model = model + self._gamma = gamma + self._eta = eta + self._eta_a = eta_a + self._lin_solver = lin_solver + self._nonlin_solver = nonlin_solver + self._rho = rho + self._linearize_current = linearize_current + self._pt3 = pt3 + + self._info = self._nonlin_solver["info"] and (MPI.COMM_WORLD.Get_rank() == 0) # Femfields for the projector self.rhof = self.derham.create_spline_function("rhof", "L2") @@ -6544,13 +6289,9 @@ def allocate(self): self.cbf12 = self.derham.create_spline_function("cBf", "Hcurl") # Projector - self._energy_evaluator = InternalEnergyEvaluator(self.derham, self._gamma) self._initialize_projectors_and_mass() # bunch of temporaries to avoid allocating in the loop - s = self.variables.s.spline.vector - b = self.variables.b.spline.vector - self._tmp_bn1 = b.space.zeros() self._tmp_bn12 = b.space.zeros() self._tmp_sn1 = s.space.zeros() @@ -6567,7 +6308,7 @@ def allocate(self): ) def __call__(self, dt): - if self._nonlin_solver.type == "Newton": + if self._nonlin_solver["type"] == "Newton": self.__call_newton(dt) else: raise ValueError( @@ -6577,11 +6318,10 @@ def __call__(self, dt): def __call_newton(self, dt): """Solve the non linear system for updating the variables using Newton iteration method""" # Compute dissipation implicitely - sn = self.variables.s.spline.vector - bn = self.variables.b.spline.vector - + sn = self.feec_vars[0] + bn = self.feec_vars[1] if self._eta < 1.0e-15 and self._eta_a < 1.0e-15: - self.update_feec_variables(s=sn, b=bn) + self.feec_vars_update(sn, bn) return if self._info: @@ -6608,17 +6348,17 @@ def __call_newton(self, dt): print("information on the linear solver : ", self.inv_lop._info) if self._model == "linear_p" or (self._model == "linear_q" and self._nonlin_solver["fast"]): - self.update_feec_variables(s=sn, b=bn1) + self.feec_vars_update(sn, bn1) return # Energy balance term # 1) Pointwize energy change energy_change = self._get_energy_change(bn, bn1, total_resistivity) # 2) Initial energy and linear form - rho = self._rho.spline.vector + rho = self._rho self.rhof.vector = rho if self._model in ["deltaf_q", "linear_q"]: - self.sf.vector = self._pt3.spline.vector + self.sf.vector = self._pt3 else: self.sf.vector = sn @@ -6678,7 +6418,7 @@ def __call_newton(self, dt): for it in range(self._nonlin_solver["maxiter"]): if self._model in ["deltaf_q", "linear_q"]: - self.sf1.vector = self._pt3.spline.vector + self.sf1.vector = self._pt3 else: self.sf1.vector = sn1 @@ -6730,7 +6470,7 @@ def __call_newton(self, dt): if self._info: print("iteration : ", it, " error : ", err) - if (err < tol**2 and it > 0) or xp.isnan(err): + if (err < tol**2 and it > 0) or np.isnan(err): break if self._model == "full": @@ -6766,12 +6506,12 @@ def __call_newton(self, dt): else: sn1 += incr - if it == self._nonlin_solver["maxiter"] - 1 or xp.isnan(err): + if it == self._nonlin_solver["maxiter"] - 1 or np.isnan(err): print( - f"!!!Warning: Maximum iteration in VariationalResistivity reached - not converged:\n {err =} \n {tol**2 =}", + f"!!!Warning: Maximum iteration in VariationalResistivity reached - not converged:\n {err = } \n {tol**2 = }", ) - self.update_feec_variables(s=sn1, b=bn1) + self.feec_vars_update(sn1, bn1) # if self._pt3 is not None: # bn12 = bn.copy(out=self._tmp_bn12) @@ -6849,7 +6589,7 @@ def __call_newton(self, dt): # if self._info: # print("iteration : ", it, " error : ", err) - # if (err < tol**2 and it > 0) or xp.isnan(err): + # if (err < tol**2 and it > 0) or np.isnan(err): # break # incr = self.inv_jac.dot(self.tot_rhs, out=self._tmp_sn_incr) @@ -6862,6 +6602,8 @@ def __call_newton(self, dt): def _initialize_projectors_and_mass(self): """Initialization of all the `BasisProjectionOperator` and needed to compute the bracket term""" + from struphy.feec.projectors import L2Projector + pc_M1 = preconditioner.MassMatrixDiagonalPreconditioner( self.mass_ops.M1, ) @@ -6892,12 +6634,12 @@ def _initialize_projectors_and_mass(self): D = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.M1_cb = self.mass_ops.create_weighted_mass("Hcurl", "Hcurl", weights=[D, "sqrt_g"]) - if self.options.precond is None: + if self._lin_solver["type"][1] is None: self.pc = None else: pc_class = getattr( preconditioner, - self.options.precond, + self._lin_solver["type"][1], ) self.pc_jac = pc_class(self.M_de_ds) @@ -6905,8 +6647,8 @@ def _initialize_projectors_and_mass(self): self.M_de_ds, "pcg", pc=self.pc_jac, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], verbose=False, recycle=True, ) @@ -6922,12 +6664,12 @@ def _initialize_projectors_and_mass(self): self.r_op = M2 # - self._scaled_stiffness self.l_op = M2 + self._scaled_stiffness + self.phy_cb_stiffness - if self.options.precond is None: + if self._lin_solver["type"][1] is None: self.pc = None else: pc_class = getattr( preconditioner, - self.options.precond, + self._lin_solver["type"][1], ) self.pc = pc_class(M2) @@ -6935,8 +6677,8 @@ def _initialize_projectors_and_mass(self): self.l_op, "pcg", pc=self.pc, - tol=self._lin_solver.tol, - maxiter=self._lin_solver.maxiter, + tol=self._lin_solver["tol"], + maxiter=self._lin_solver["maxiter"], verbose=False, recycle=True, ) @@ -6962,26 +6704,26 @@ def _initialize_projectors_and_mass(self): grid_shape = tuple([len(loc_grid) for loc_grid in integration_grid]) - self._cb12_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] - self._cb1_values = [xp.zeros(grid_shape, dtype=float) for i in range(3)] + self._cb12_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] + self._cb1_values = [np.zeros(grid_shape, dtype=float) for i in range(3)] - self._cb_sq_values = xp.zeros(grid_shape, dtype=float) - self._cb_sq_values_init = xp.zeros(grid_shape, dtype=float) + self._cb_sq_values = np.zeros(grid_shape, dtype=float) + self._cb_sq_values_init = np.zeros(grid_shape, dtype=float) - self._sf_values = xp.zeros(grid_shape, dtype=float) - self._sf1_values = xp.zeros(grid_shape, dtype=float) - self._rhof_values = xp.zeros(grid_shape, dtype=float) + self._sf_values = np.zeros(grid_shape, dtype=float) + self._sf1_values = np.zeros(grid_shape, dtype=float) + self._rhof_values = np.zeros(grid_shape, dtype=float) - self._e_n1 = xp.zeros(grid_shape, dtype=float) - self._e_n = xp.zeros(grid_shape, dtype=float) + self._e_n1 = np.zeros(grid_shape, dtype=float) + self._e_n = np.zeros(grid_shape, dtype=float) - self._de_s1_values = xp.zeros(grid_shape, dtype=float) + self._de_s1_values = np.zeros(grid_shape, dtype=float) - self._tmp_int_grid = xp.zeros(grid_shape, dtype=float) + self._tmp_int_grid = np.zeros(grid_shape, dtype=float) gam = self._gamma if self._model == "full": - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -6989,7 +6731,7 @@ def _initialize_projectors_and_mass(self): ) self._mass_metric_term = deepcopy(metric) - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -7022,7 +6764,7 @@ def _initialize_projectors_and_mass(self): self.pc_jac.update_mass_operator(self.M_de_ds) elif self._model in ["full_q", "linear_q", "deltaf_q"]: - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -7030,7 +6772,7 @@ def _initialize_projectors_and_mass(self): ) self._mass_metric_term = deepcopy(metric) - metric = xp.power( + metric = np.power( self.domain.jacobian_det( *integration_grid, ), @@ -7077,7 +6819,7 @@ def _update_artificial_resistivity(self, bn, dt): for j in range(3): cb_sq_v += cb_v[i] * self._sq_term_metric_no_jac[i, j] * cb_v[j] - xp.sqrt(cb_sq_v, out=cb_sq_v) + np.sqrt(cb_sq_v, out=cb_sq_v) cb_sq_v *= dt * self._eta_a @@ -7175,74 +6917,48 @@ class TimeDependentSource(Propagator): * :math:`h(\omega t) = \sin(\omega t)` """ - class Variables: - def __init__(self): - self._source: FEECVariable = None - - @property - def source(self) -> FEECVariable: - return self._source - - @source.setter - def source(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1" - self._source = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsTimeSource = Literal["cos", "sin"] - # propagator options - omega: float = 2.0 * xp.pi - hfun: OptsTimeSource = "cos" + @staticmethod + def options(default=False): + dct = {} + dct["omega"] = 1.0 + dct["hfun"] = ["cos", "sin"] + if default: + dct = descend_options_dict(dct, []) + return dct - def __post_init__(self): - # checks - check_option(self.hfun, self.OptsTimeSource) + def __init__( + self, + c: StencilVector, + *, + omega: float = options()["omega"], + hfun: str = options(default=True)["hfun"], + ): + super().__init__(c) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - if self.options.hfun == "cos": + if hfun == "cos": def hfun(t): - return xp.cos(self.options.omega * t) - elif self.options.hfun == "sin": + return np.cos(omega * t) + elif hfun == "sin": def hfun(t): - return xp.sin(self.options.omega * t) + return np.sin(omega * t) else: - raise NotImplementedError(f"{self.options.hfun =} not implemented.") + raise NotImplementedError(f"{hfun = } not implemented.") self._hfun = hfun - self._c0 = self.variables.source.spline.vector.copy() - @profile def __call__(self, dt): + print(f"{self.time_state[0] = }") + if self.time_state[0] == 0.0: + self._c0 = self.feec_vars[0].copy() + print("Initial source coeffs set.") + # new coeffs cn1 = self._c0 * self._hfun(self.time_state[0]) # write new coeffs into self.feec_vars - # max_dc = self.feec_vars_update(cn1) - self.update_feec_variables(source=cn1) + max_dc = self.feec_vars_update(cn1) class AdiabaticPhi(Propagator): @@ -7480,100 +7196,63 @@ class HasegawaWakatani(Propagator): Solver parameters for M0 inversion. """ - class Variables: - def __init__(self): - self._n: FEECVariable = None - self._omega: FEECVariable = None - - @property - def n(self) -> FEECVariable: - return self._n - - @n.setter - def n(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1" - self._n = new - - @property - def omega(self) -> FEECVariable: - return self._omega - - @omega.setter - def omega(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "H1" - self._omega = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsCfun = Literal["const"] - # propagator options - phi: FEECVariable = None - c_fun: OptsCfun = "const" - kappa: float = 1.0 - nu: float = 0.01 - butcher: ButcherTableau = None - solver: OptsSymmSolver = "pcg" - precond: OptsMassPrecond = "MassMatrixPreconditioner" - solver_params: SolverParameters = None - - def __post_init__(self): - # checks - check_option(self.c_fun, self.OptsCfun) - check_option(self.solver, OptsSymmSolver) - check_option(self.precond, OptsMassPrecond) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() - - if self.butcher is None: - self.butcher = ButcherTableau() + @staticmethod + def options(default=False): + dct = {} + dct["c_fun"] = ["const"] + dct["kappa"] = 1.0 + dct["nu"] = 0.01 + dct["algo"] = ButcherTableau.available_methods() + dct["M0_solver"] = { + "type": [ + ("pcg", "MassMatrixPreconditioner"), + ("cg", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + if default: + dct = descend_options_dict(dct, []) + return dct + + def __init__( + self, + n0: StencilVector, + omega0: StencilVector, + *, + phi: SplineFunction = None, + c_fun: str = options(default=True)["c_fun"], + kappa: float = options(default=True)["kappa"], + nu: float = options(default=True)["nu"], + algo: str = options(default=True)["algo"], + M0_solver: dict = options(default=True)["M0_solver"], + ): + super().__init__(n0, omega0) - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): # default phi - if self.options.phi is None: - self.options.phi = FEECVariable(space="H1") - self.options.phi.allocate(derham=self.derham, domain=self.domain) - - self._phi = self.options.phi.spline - self._phi.vector[:] = 1.0 - self._phi.vector.update_ghost_regions() + if phi is None: + self._phi = self.derham.create_spline_function("phi", "H1") + self._phi.vector[:] = 1.0 + self._phi.vector.update_ghost_regions() + else: + self._phi = phi # default c-function - if self.options.c_fun == "const": + if c_fun == "const": c_fun = lambda e1, e2, e3: 0.0 + 0.0 * e1 else: - raise NotImplementedError(f"{self.options.c_fun =} is not available.") + raise NotImplementedError(f"{c_fun = } is not available.") # expose equation parameters - self._kappa = self.options.kappa - self._nu = self.options.nu + self._kappa = kappa + self._nu = nu # get quadrature grid of V0 pts = [grid.flatten() for grid in self.derham.quad_grid_pts["0"]] - mesh_pts = xp.meshgrid(*pts, indexing="ij") + mesh_pts = np.meshgrid(*pts, indexing="ij") # evaluate c(x, y) and metric coeff at local quadrature grid and multiply self._weights = c_fun(*mesh_pts) @@ -7606,13 +7285,13 @@ def allocate(self): for m in range(3): self._M1hw_weights += [[None, None, None]] - self._phi_5d = xp.zeros((*self._phi_at_pts.shape, 3, 3), dtype=float) - self._tmp_5d = xp.zeros((*self._phi_at_pts.shape, 3, 3), dtype=float) - self._tmp_5dT = xp.zeros((3, 3, *self._phi_at_pts.shape), dtype=float) + self._phi_5d = np.zeros((*self._phi_at_pts.shape, 3, 3), dtype=float) + self._tmp_5d = np.zeros((*self._phi_at_pts.shape, 3, 3), dtype=float) + self._tmp_5dT = np.zeros((3, 3, *self._phi_at_pts.shape), dtype=float) self._phi_5d[:, :, :, 0, 1] = self._phi_at_pts * self._jac_det self._phi_5d[:, :, :, 1, 0] = -self._phi_at_pts * self._jac_det self._tmp_5d[:] = self._jac_inv @ self._phi_5d @ self._jac_invT - self._tmp_5dT[:] = xp.transpose(self._tmp_5d, axes=(3, 4, 0, 1, 2)) + self._tmp_5dT[:] = np.transpose(self._tmp_5d, axes=(3, 4, 0, 1, 2)) self._M1hw_weights[0][1] = self._tmp_5dT[0, 1, :, :, :] self._M1hw_weights[1][0] = self._tmp_5dT[1, 0, :, :, :] @@ -7627,24 +7306,16 @@ def allocate(self): ) # inverse M0 mass matrix - solver = self.options.solver - if self.options.precond is None: + solver = M0_solver["type"][0] + if M0_solver["type"][1] is None: pc = None else: - pc_class = getattr(preconditioner, self.options.precond) + pc_class = getattr(preconditioner, M0_solver["type"][1]) pc = pc_class(self.mass_ops.M0) - # solver_params = deepcopy(M0_solver) # need a copy to pop, otherwise testing fails - # solver_params.pop("type") - self._info = self.options.solver_params.info - M0_inv = inverse( - M0, - solver, - pc=pc, - tol=self.options.solver_params.tol, - maxiter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, - recycle=self.options.solver_params.recycle, - ) + solver_params = deepcopy(M0_solver) # need a copy to pop, otherwise testing fails + solver_params.pop("type") + self._info = solver_params.pop("info") + M0_inv = inverse(M0, solver, pc=pc, **solver_params) # basis projection operator df_12 = lambda e1, e2, e3: self.domain.jacobian_inv(e1, e2, e3)[0, 1, :, :, :] @@ -7662,9 +7333,6 @@ def allocate(self): # print(f"{self._BPO._dof_mat.blocks = }") # pre-allocated helper arrays - n0 = self.variables.n.spline.vector - omega0 = self.variables.omega.spline.vector - self._tmp1 = n0.space.zeros() tmp2 = n0.space.zeros() self._tmp3 = n0.space.zeros() @@ -7672,11 +7340,11 @@ def allocate(self): tmp5 = n0.space.zeros() # rhs-callables for explicit ode solve - terms1_n = -M0c + grad.T @ self._M1hw @ grad - self.options.nu * grad.T @ M1 @ grad + terms1_n = -M0c + grad.T @ self._M1hw @ grad - nu * grad.T @ M1 @ grad terms1_phi = M0c - terms1_phi_strong = -self.options.kappa * self._BPO @ grad + terms1_phi_strong = -kappa * self._BPO @ grad - terms2_omega = grad.T @ self._M1hw @ grad - self.options.nu * grad.T @ M1 @ grad + terms2_omega = grad.T @ self._M1hw @ grad - nu * grad.T @ M1 @ grad terms2_n = -M0c terms2_phi = M0c @@ -7704,7 +7372,7 @@ def f2(t, n, omega, out=out2): return out vector_field = {n0: f1, omega0: f2} - self._ode_solver = ODEsolverFEEC(vector_field, butcher=self.options.butcher) + self._ode_solver = ODEsolverFEEC(vector_field, algo=algo) def __call__(self, dt): # update time-dependent mass operator @@ -7713,7 +7381,7 @@ def __call__(self, dt): self._phi_5d[:, :, :, 0, 1] = self._phi_at_pts * self._jac_det self._phi_5d[:, :, :, 1, 0] = -self._phi_at_pts * self._jac_det self._tmp_5d[:] = self._jac_inv @ self._phi_5d @ self._jac_invT - self._tmp_5dT[:] = xp.transpose(self._tmp_5d, axes=(3, 4, 0, 1, 2)) + self._tmp_5dT[:] = np.transpose(self._tmp_5d, axes=(3, 4, 0, 1, 2)) self._M1hw_weights[0][1] = self._tmp_5dT[0, 1, :, :, :] self._M1hw_weights[1][0] = self._tmp_5dT[1, 0, :, :, :] @@ -7742,122 +7410,91 @@ class TwoFluidQuasiNeutralFull(Propagator): :ref:`time_discret`: fully implicit. """ - class Variables: - def __init__(self): - self._u: FEECVariable = None - self._ue: FEECVariable = None - self._phi: FEECVariable = None - - @property - def u(self) -> FEECVariable: - return self._u - - @u.setter - def u(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._u = new - - @property - def ue(self) -> FEECVariable: - return self._ue - - @ue.setter - def ue(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "Hdiv" - self._ue = new - - @property - def phi(self) -> FEECVariable: - return self._phi - - @phi.setter - def phi(self, new): - assert isinstance(new, FEECVariable) - assert new.space == "L2" - self._phi = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsDimension = Literal["1D", "2D", "Restelli", "Tokamak"] - # propagator options - nu: float = 1.0 - nu_e: float = 0.01 - eps_norm: float = 1.0 - solver: OptsGenSolver = "GMRES" - solver_params: SolverParameters = None - a: float = 1.0 - R0: float = 1.0 - B0: float = 10.0 - Bp: float = 12.0 - alpha: float = 0.1 - beta: float = 1.0 - stab_sigma: float = 1e-5 - variant: OptsSaddlePointSolver = "Uzawa" - method_to_solve: OptsDirectSolver = "DirectNPInverse" - preconditioner: bool = False - spectralanalysis: bool = False - lifting: bool = False - dimension: OptsDimension = "2D" - D1_dt: float = 1e-3 - - def __post_init__(self): - # checks - check_option(self.solver, OptsGenSolver) - check_option(self.variant, OptsSaddlePointSolver) - check_option(self.method_to_solve, OptsDirectSolver) - check_option(self.dimension, self.OptsDimension) - - # defaults - if self.solver_params is None: - self.solver_params = SolverParameters() + @staticmethod + def options(default=False): + dct = {} + dct["solver"] = { + "type": [ + ("gmres", None), + ], + "tol": 1.0e-8, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": True, + } + dct["nu"] = 1.0 + dct["nu_e"] = 0.01 + dct["override_eq_params"] = [False, {"epsilon": 1.0}] + dct["eps_norm"] = 1.0 + dct["a"] = 1.0 + dct["R0"] = 1.0 + dct["B0"] = 10.0 + dct["Bp"] = 12.5 + dct["alpha"] = 0.1 + dct["beta"] = 1.0 + dct["stab_sigma"] = 0.00001 + dct["variant"] = "GMRES" + dct["method_to_solve"] = "DirectNPInverse" + dct["preconditioner"] = False + dct["spectralanalysis"] = False + dct["lifting"] = False + dct["dimension"] = "2D" + dct["1D_dt"] = 0.001 + if default: + dct = descend_options_dict(dct, []) + + return dct - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._info = self.options.solver_params.info + def __init__( + self, + u: BlockVector, + ue: BlockVector, + phi: BlockVector, + *, + nu: float = options(default=True)["nu"], + nu_e: float = options(default=True)["nu_e"], + eps_norm: float = options(default=True)["eps_norm"], + solver: dict = options(default=True)["solver"], + a: float = options(default=True)["a"], + R0: float = options(default=True)["R0"], + B0: float = options(default=True)["B0"], + Bp: float = options(default=True)["Bp"], + alpha: float = options(default=True)["alpha"], + beta: float = options(default=True)["beta"], + stab_sigma: float = options(default=True)["stab_sigma"], + variant: str = options(default=True)["variant"], + method_to_solve: str = options(default=True)["method_to_solve"], + preconditioner: bool = options(default=True)["preconditioner"], + spectralanalysis: bool = options(default=True)["spectralanalysis"], + lifting: bool = options(default=False)["lifting"], + dimension: str = options(default=True)["dimension"], + D1_dt: float = options(default=True)["1D_dt"], + ): + super().__init__(u, ue, phi) + + self._info = solver["info"] if self.derham.comm is not None: self._rank = self.derham.comm.Get_rank() else: self._rank = 0 - self._nu = self.options.nu - self._nu_e = self.options.nu_e - self._eps_norm = self.options.eps_norm - self._a = self.options.a - self._R0 = self.options.R0 - self._B0 = self.options.B0 - self._Bp = self.options.Bp - self._alpha = self.options.alpha - self._beta = self.options.beta - self._stab_sigma = self.options.stab_sigma - self._variant = self.options.variant - self._method_to_solve = self.options.method_to_solve - self._preconditioner = self.options.preconditioner - self._dimension = self.options.dimension - self._spectralanalysis = self.options.spectralanalysis - self._lifting = self.options.lifting - - solver_params = self.options.solver_params + self._nu = nu + self._nu_e = nu_e + self._eps_norm = eps_norm + self._a = a + self._R0 = R0 + self._B0 = B0 + self._Bp = Bp + self._alpha = alpha + self._beta = beta + self._stab_sigma = stab_sigma + self._variant = variant + self._method_to_solve = method_to_solve + self._preconditioner = preconditioner + self._dimension = dimension + self._spectralanalysis = spectralanalysis + self._lifting = lifting # Lifting for nontrivial boundary conditions # derham had boundary conditions in eta1 direction, the following is in space Hdiv_0 @@ -7873,13 +7510,13 @@ def allocate(self): self._mass_opsv0 = WeightedMassOperators( self.derhamv0, self.domain, - verbose=solver_params.verbose, + verbose=solver["verbose"], eq_mhd=self.mass_ops.weights["eq_mhd"], ) self._basis_opsv0 = BasisProjectionOperators( self.derhamv0, self.domain, - verbose=solver_params.verbose, + verbose=solver["verbose"], eq_mhd=self.basis_ops.weights["eq_mhd"], ) else: @@ -7908,7 +7545,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, ) _funy = getattr(callables, "ManufacturedSolutionForceterm")( species="Ions", @@ -7918,7 +7555,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, ) _funelectronsx = getattr(callables, "ManufacturedSolutionForceterm")( species="Electrons", @@ -7928,7 +7565,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, ) _funelectronsy = getattr(callables, "ManufacturedSolutionForceterm")( species="Electrons", @@ -7938,7 +7575,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, ) # get callable(s) for specified init type @@ -7947,32 +7584,16 @@ def allocate(self): # pullback callable funx = TransformedPformComponent( - forceterm_class, - given_in_basis="physical", - out_form="2", - comp=0, - domain=self.domain, + forceterm_class, fun_basis="physical", out_form="2", comp=0, domain=self.domain ) funy = TransformedPformComponent( - forceterm_class, - given_in_basis="physical", - out_form="2", - comp=1, - domain=self.domain, + forceterm_class, fun_basis="physical", out_form="2", comp=1, domain=self.domain ) fun_electronsx = TransformedPformComponent( - forcetermelectrons_class, - given_in_basis="physical", - out_form="2", - comp=0, - domain=self.domain, + forcetermelectrons_class, fun_basis="physical", out_form="2", comp=0, domain=self.domain ) fun_electronsy = TransformedPformComponent( - forcetermelectrons_class, - given_in_basis="physical", - out_form="2", - comp=1, - domain=self.domain, + forcetermelectrons_class, fun_basis="physical", out_form="2", comp=1, domain=self.domain ) l2_proj = L2Projector(space_id="Hdiv", mass_ops=self.mass_ops) self._F1 = l2_proj([funx, funy, _forceterm_logical]) @@ -8007,46 +7628,22 @@ def allocate(self): # pullback callable fun_pb_1 = TransformedPformComponent( - forceterm_class, - given_in_basis="physical", - out_form="2", - comp=0, - domain=self.domain, + forceterm_class, fun_basis="physical", out_form="2", comp=0, domain=self.domain ) fun_pb_2 = TransformedPformComponent( - forceterm_class, - given_in_basis="physical", - out_form="2", - comp=1, - domain=self.domain, + forceterm_class, fun_basis="physical", out_form="2", comp=1, domain=self.domain ) fun_pb_3 = TransformedPformComponent( - forceterm_class, - given_in_basis="physical", - out_form="2", - comp=2, - domain=self.domain, + forceterm_class, fun_basis="physical", out_form="2", comp=2, domain=self.domain ) fun_electrons_pb_1 = TransformedPformComponent( - forcetermelectrons_class, - given_in_basis="physical", - out_form="2", - comp=0, - domain=self.domain, + forcetermelectrons_class, fun_basis="physical", out_form="2", comp=0, domain=self.domain ) fun_electrons_pb_2 = TransformedPformComponent( - forcetermelectrons_class, - given_in_basis="physical", - out_form="2", - comp=1, - domain=self.domain, + forcetermelectrons_class, fun_basis="physical", out_form="2", comp=1, domain=self.domain ) fun_electrons_pb_3 = TransformedPformComponent( - forcetermelectrons_class, - given_in_basis="physical", - out_form="2", - comp=2, - domain=self.domain, + forcetermelectrons_class, fun_basis="physical", out_form="2", comp=2, domain=self.domain ) if self._lifting: l2_proj = L2Projector(space_id="Hdiv", mass_ops=self._mass_opsv0) @@ -8069,7 +7666,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, a=self._a, Bp=self._Bp, alpha=self._alpha, @@ -8083,7 +7680,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, a=self._a, Bp=self._Bp, alpha=self._alpha, @@ -8097,7 +7694,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, a=self._a, Bp=self._Bp, alpha=self._alpha, @@ -8111,7 +7708,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, a=self._a, Bp=self._Bp, alpha=self._alpha, @@ -8125,7 +7722,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, a=self._a, Bp=self._Bp, alpha=self._alpha, @@ -8139,7 +7736,7 @@ def allocate(self): dimension=self._dimension, stab_sigma=self._stab_sigma, eps=self._eps_norm, - dt=self.options.D1_dt, + dt=D1_dt, a=self._a, Bp=self._Bp, alpha=self._alpha, @@ -8152,46 +7749,22 @@ def allocate(self): # pullback callable fun_pb_1 = TransformedPformComponent( - forceterm_class, - given_in_basis="physical", - out_form="2", - comp=0, - domain=self.domain, + forceterm_class, fun_basis="physical", out_form="2", comp=0, domain=self.domain ) fun_pb_2 = TransformedPformComponent( - forceterm_class, - given_in_basis="physical", - out_form="2", - comp=1, - domain=self.domain, + forceterm_class, fun_basis="physical", out_form="2", comp=1, domain=self.domain ) fun_pb_3 = TransformedPformComponent( - forceterm_class, - given_in_basis="physical", - out_form="2", - comp=2, - domain=self.domain, + forceterm_class, fun_basis="physical", out_form="2", comp=2, domain=self.domain ) fun_electrons_pb_1 = TransformedPformComponent( - forcetermelectrons_class, - given_in_basis="physical", - out_form="2", - comp=0, - domain=self.domain, + forcetermelectrons_class, fun_basis="physical", out_form="2", comp=0, domain=self.domain ) fun_electrons_pb_2 = TransformedPformComponent( - forcetermelectrons_class, - given_in_basis="physical", - out_form="2", - comp=1, - domain=self.domain, + forcetermelectrons_class, fun_basis="physical", out_form="2", comp=1, domain=self.domain ) fun_electrons_pb_3 = TransformedPformComponent( - forcetermelectrons_class, - given_in_basis="physical", - out_form="2", - comp=2, - domain=self.domain, + forcetermelectrons_class, fun_basis="physical", out_form="2", comp=2, domain=self.domain ) if self._lifting: l2_proj = L2Projector(space_id="Hdiv", mass_ops=self._mass_opsv0) @@ -8272,10 +7845,7 @@ def allocate(self): self._S21 = None if self.derhamv0.with_local_projectors: self._S21 = BasisProjectionOperatorLocal( - self.derhamv0._Ploc["1"], - self.derhamv0.Vh_fem["2"], - fun, - transposed=False, + self.derhamv0._Ploc["1"], self.derhamv0.Vh_fem["2"], fun, transposed=False ) if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): @@ -8385,10 +7955,7 @@ def allocate(self): self._S21 = None if self.derham.with_local_projectors: self._S21 = BasisProjectionOperatorLocal( - self.derham._Ploc["1"], - self.derham.Vh_fem["2"], - fun, - transposed=False, + self.derham._Ploc["1"], self.derham.Vh_fem["2"], fun, transposed=False ) if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): @@ -8434,9 +8001,9 @@ def allocate(self): A11np = self._M2np + self._A11np_notimedependency if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): - A11np += self._stab_sigma * xp.identity(A11np.shape[0]) + A11np += self._stab_sigma * np.identity(A11np.shape[0]) self.A22np = ( - self._stab_sigma * xp.identity(A11np.shape[0]) + self._stab_sigma * np.identity(A11np.shape[0]) + self._nu_e * ( self._Dnp.T @ self._M3np @ self._Dnp @@ -8445,7 +8012,7 @@ def allocate(self): + self._M2Bnp / self._eps_norm ) self._A22prenp = ( - xp.identity(self.A22np.shape[0]) * self._stab_sigma + np.identity(self.A22np.shape[0]) * self._stab_sigma ) # + self._nu_e * (self._Dnp.T @ self._M3np @ self._Dnp) elif self._method_to_solve in ("SparseSolver", "ScipySparse"): A11np += self._stab_sigma * sc.sparse.eye(A11np.shape[0], format="csr") @@ -8478,10 +8045,10 @@ def allocate(self): A=_A, B=_B, F=_F, - solver_name=self.options.solver, - tol=self.options.solver_params.tol, - max_iter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, + solver_name=solver["type"][0], + tol=solver["tol"], + max_iter=solver["maxiter"], + verbose=solver["verbose"], pc=None, ) # Allocate memory for call @@ -8495,17 +8062,17 @@ def allocate(self): F=_Fnp, method_to_solve=self._method_to_solve, preconditioner=self._preconditioner, - spectralanalysis=self.options.spectralanalysis, - tol=self.options.solver_params.tol, - max_iter=self.options.solver_params.maxiter, - verbose=self.options.solver_params.verbose, + spectralanalysis=spectralanalysis, + tol=solver["tol"], + max_iter=solver["maxiter"], + verbose=solver["verbose"], ) def __call__(self, dt): # current variables - unfeec = self.variables.u.spline.vector - uenfeec = self.variables.ue.spline.vector - phinfeec = self.variables.phi.spline.vector + unfeec = self.feec_vars[0] + uenfeec = self.feec_vars[1] + phinfeec = self.feec_vars[2] if self._variant == "GMRES": if self._lifting: @@ -8622,13 +8189,13 @@ def __call__(self, dt): uen = _sol1[1] phin = _sol2 # write new coeffs into self.feec_vars - max_du, max_due, max_dphi = self.update_feec_variables(u=un, ue=uen, phi=phin) + max_du, max_due, max_dphi = self.feec_vars_update(un, uen, phin) elif self._variant == "Uzawa": # Numpy A11np = self._M2np / dt + self._A11np_notimedependency if self._method_to_solve in ("DirectNPInverse", "InexactNPInverse"): - A11np += self._stab_sigma * xp.identity(A11np.shape[0]) + A11np += self._stab_sigma * np.identity(A11np.shape[0]) _A22prenp = self._A22prenp A22np = self.A22np elif self._method_to_solve in ("SparseSolver", "ScipySparse"): @@ -8638,7 +8205,7 @@ def __call__(self, dt): # _Anp[1] and _Anppre[1] remain unchanged _Anp = [A11np, A22np] - if self._preconditioner: + if self._preconditioner == True: _A11prenp = self._M2np / dt # + self._A11prenp_notimedependency _Anppre = [_A11prenp, _A22prenp] @@ -8675,24 +8242,20 @@ def __call__(self, dt): _Fnp = [_F1np, _F2np] if self.rank == 0: - if self._preconditioner: + if self._preconditioner == True: self._solver_UzawaNumpy.Apre = _Anppre self._solver_UzawaNumpy.A = _Anp self._solver_UzawaNumpy.F = _Fnp if self._lifting: un, uen, phin, info, residual_norms, spectralresult = self._solver_UzawaNumpy( - u0.vector, - ue0.vector, - phinfeec, + u0.vector, ue0.vector, phinfeec ) un += u_prime.vector.toarray() uen += ue_prime.vector.toarray() else: un, uen, phin, info, residual_norms, spectralresult = self._solver_UzawaNumpy( - unfeec, - uenfeec, - phinfeec, + unfeec, uenfeec, phinfeec ) dimlist = [[shp - 2 * pi for shp, pi in zip(unfeec[i][:].shape, self.derham.p)] for i in range(3)] @@ -8722,10 +8285,10 @@ def __call__(self, dt): e = phi_temp.ends phi_temp[s[0] : e[0] + 1, s[1] : e[1] + 1, s[2] : e[2] + 1] = phin.reshape(*dimphi) else: - print("TwoFluidQuasiNeutralFull is only running on one MPI.") + print(f"TwoFluidQuasiNeutralFull is only running on one MPI.") # write new coeffs into self.feec_vars - max_du, max_due, max_dphi = self.update_feec_variables(u=u_temp, ue=ue_temp, phi=phi_temp) + max_du, max_due, max_dphi = self.feec_vars_update(u_temp, ue_temp, phi_temp) if self._info and self._rank == 0: print("Status for TwoFluidQuasiNeutralFull:", info["success"]) diff --git a/src/struphy/propagators/propagators_markers.py b/src/struphy/propagators/propagators_markers.py index 0360f39de..fff58d514 100644 --- a/src/struphy/propagators/propagators_markers.py +++ b/src/struphy/propagators/propagators_markers.py @@ -1,37 +1,22 @@ "Only particle variables are updated." -import copy -from dataclasses import dataclass -from typing import Callable, Literal, get_args - -import cunumpy as xp -from line_profiler import profile from numpy import array, polynomial, random -from psydac.ddm.mpi import mpi as MPI -from psydac.linalg.basic import LinearOperator from psydac.linalg.block import BlockVector from psydac.linalg.stencil import StencilVector from struphy.feec.mass import WeightedMassOperators from struphy.fields_background.base import MHDequilibrium from struphy.fields_background.equils import set_defaults -from struphy.io.options import ( - OptsKernel, - OptsMPIsort, - OptsVecSpace, - check_option, -) from struphy.io.setup import descend_options_dict -from struphy.models.variables import FEECVariable, PICVariable, SPHVariable from struphy.ode.utils import ButcherTableau from struphy.pic.accumulation import accum_kernels, accum_kernels_gc -from struphy.pic.accumulation.particles_to_grid import AccumulatorVector from struphy.pic.base import Particles from struphy.pic.particles import Particles3D, Particles5D, Particles6D, ParticlesSPH from struphy.pic.pushing import eval_kernels_gc, pusher_kernels, pusher_kernels_gc from struphy.pic.pushing.pusher import Pusher from struphy.polar.basic import PolarVector from struphy.propagators.base import Propagator +from struphy.utils.arrays import xp as np from struphy.utils.pyccel import Pyccelkernel @@ -51,60 +36,47 @@ class PushEta(Propagator): Available algorithms: * Explicit from :class:`~struphy.ode.utils.ButcherTableau` + + Parameters + ---------- + particles : Particles6D | ParticlesSPH + Particles object. + + algo : str + Algorithm for solving the ODE (see options below). + + density_field: StencilVector + Storage for density evaluation at each __call__. """ - class Variables: - def __init__(self): - self._var: PICVariable | SPHVariable = None - - @property - def var(self) -> PICVariable | SPHVariable: - return self._var - - @var.setter - def var(self, new): - assert isinstance(new, PICVariable | SPHVariable) - self._var = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - butcher: ButcherTableau = None - - def __post_init__(self): - # defaults - if self.butcher is None: - self.butcher = ButcherTableau() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): + @staticmethod + def options(default=False): + dct = {} + dct["algo"] = ["rk4", "forward_euler", "heun2", "rk2", "heun3"] + if default: + dct = descend_options_dict(dct, []) + return dct + + def __init__( + self, + particles: Particles6D | ParticlesSPH, + *, + algo: str = options(default=True)["algo"], + density_field: StencilVector | None = None, + ): + # base class constructor call + super().__init__(particles) + # get kernel kernel = Pyccelkernel(pusher_kernels.push_eta_stage) # define algorithm - butcher = self.options.butcher + butcher = ButcherTableau(algo) # temp fix due to refactoring of ButcherTableau: - import cunumpy as xp + from struphy.utils.arrays import xp as np - butcher._a = xp.diag(butcher.a, k=-1) - butcher._a = xp.array(list(butcher.a) + [0.0]) + butcher._a = np.diag(butcher.a, k=-1) + butcher._a = np.array(list(butcher.a) + [0.0]) args_kernel = ( butcher.a, @@ -113,7 +85,7 @@ def allocate(self): ) self._pusher = Pusher( - self.variables.var.particles, + particles, kernel, args_kernel, self.domain.args_domain, @@ -122,13 +94,28 @@ def allocate(self): mpi_sort="each", ) - @profile + self._eval_density = False + if density_field is not None: + self._eval_density = True + self._density_field = density_field + def __call__(self, dt): self._pusher(dt) # update_weights - if self.variables.var.particles.control_variate: - self.variables.var.particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() + + if self._eval_density: + eval_density = lambda eta1, eta2, eta3: self.particles[0].eval_density( + eta1, + eta2, + eta3, + h1=0.1, + h2=0.1, + h3=0.1, + ) + self.derham.P["3"](eval_density, out=self._density_field) class PushVxB(Propagator): @@ -136,9 +123,9 @@ class PushVxB(Propagator): .. math:: - \frac{\textnormal d \mathbf v_p(t)}{\textnormal d t} = \frac{1}{\varepsilon} \, \mathbf v_p(t) \times (\mathbf B + \mathbf B_{\text{add}}) \,, + \frac{\textnormal d \mathbf v_p(t)}{\textnormal d t} = \kappa \, \mathbf v_p(t) \times (\mathbf B + \mathbf B_{\text{add}}) \,, - where :math:`\varepsilon = 1/(\hat\Omega_c \hat t)` is a constant scaling factor, and for rotation vector :math:`\mathbf B` and optional, additional fixed rotation + where :math:`\kappa \in \mathbb R` is a constant scaling factor, and for rotation vector :math:`\mathbf B` and optional, additional fixed rotation vector :math:`\mathbf B_{\text{add}}`, both given as a 2-form: .. math:: @@ -148,80 +135,45 @@ class PushVxB(Propagator): Available algorithms: ``analytic``, ``implicit``. """ - class Variables: - def __init__(self): - self._ions: PICVariable | SPHVariable = None - - @property - def ions(self) -> PICVariable | SPHVariable: - return self._ions - - @ions.setter - def ions(self, new): - assert isinstance(new, PICVariable | SPHVariable) - assert new.space in ("Particles6D", "DeltaFParticles6D", "ParticlesSPH") - self._ions = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsAlgo = Literal["analytic", "implicit"] - # propagator options - algo: OptsAlgo = "analytic" - b2_var: FEECVariable = None - - def __post_init__(self): - # checks - check_option(self.algo, self.OptsAlgo) - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - # scaling factor - self._epsilon = self.variables.ions.species.equation_params.epsilon - assert self.derham is not None, f"{self.__class__.__name__} needs a Derham object." + @staticmethod + def options(default=False): + dct = {} + dct["algo"] = ["analytic", "implicit"] + if default: + dct = descend_options_dict(dct, []) + return dct + def __init__( + self, + particles: Particles6D, + *, + algo: str = options(default=True)["algo"], + kappa: float = 1.0, + b2: BlockVector | PolarVector, + b2_add: BlockVector | PolarVector = None, + ): # TODO: treat PolarVector as well, but polar splines are being reworked at the moment - if self.projected_equil is not None: - self._b2 = self.projected_equil.b2 - assert self._b2.space == self.derham.Vh["2"] - else: - self._b2 = self.derham.Vh["2"].zeros() + assert b2.space == self.derham.Vh["2"] + if b2_add is not None: + assert b2_add.space == self.derham.Vh["2"] - if self.options.b2_var is None: - self._b2_var = None - else: - assert self.options.b2_var.spline.vector.space == self.derham.Vh["2"] - self._b2_var = self.options.b2_var.spline.vector + # base class constructor call + super().__init__(particles) - # allocate dummy vectors to avoid temporary array allocations + # parameters that need to be exposed + self._kappa = kappa + self._b2 = b2 + self._b2_add = b2_add self._tmp = self.derham.Vh["2"].zeros() self._b_full = self.derham.Vh["2"].zeros() # define pusher kernel - if self.options.algo == "analytic": + if algo == "analytic": kernel = Pyccelkernel(pusher_kernels.push_vxb_analytic) - elif self.options.algo == "implicit": + elif algo == "implicit": kernel = Pyccelkernel(pusher_kernels.push_vxb_implicit) else: - raise ValueError(f"{self.options.algo =} not supported.") + raise ValueError(f"{algo = } not supported.") # instantiate Pusher args_kernel = ( @@ -232,7 +184,7 @@ def allocate(self): ) self._pusher = Pusher( - self.variables.ions.particles, + particles, kernel, args_kernel, self.domain.args_domain, @@ -240,26 +192,24 @@ def allocate(self): ) # transposed extraction operator PolarVector --> BlockVector (identity map in case of no polar splines) - self._E2T: LinearOperator = self.derham.extraction_ops["2"].transpose() + self._E2T = self.derham.extraction_ops["2"].transpose() - @profile def __call__(self, dt): # sum up total magnetic field tmp = self._b2.copy(out=self._tmp) - if self._b2_var is not None: - tmp += self._b2_var + if self._b2_add is not None: + tmp += self._b2_add # extract coefficients to tensor product space - b_full: BlockVector = self._E2T.dot(tmp, out=self._b_full) + b_full = self._E2T.dot(tmp, out=self._b_full) b_full.update_ghost_regions() - b_full /= self._epsilon # call pusher kernel - self._pusher(dt) + self._pusher(self._kappa * dt) # update_weights - if self.variables.ions.particles.control_variate: - self.variables.ions.particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() class PushVinEfield(Propagator): @@ -267,107 +217,57 @@ class PushVinEfield(Propagator): .. math:: - \frac{\text{d} \mathbf{v}_p}{\text{d} t} = \frac{1}{\varepsilon} \, \mathbf{E}(\mathbf{x}_p) \,, + \frac{\text{d} \mathbf{v}_p}{\text{d} t} = \kappa \, \mathbf{E}(\mathbf{x}_p) \,, - where :math:`\varepsilon \in \mathbb R` is a constant. In logical coordinates, given by :math:`\mathbf x = F(\boldsymbol \eta)`: + where :math:`\kappa \in \mathbb R` is a constant and in logical coordinates, given by :math:`\mathbf x = F(\boldsymbol \eta)`: .. math:: - \frac{\text{d} \mathbf{v}_p}{\text{d} t} = \frac{1}{\varepsilon} \, DF^{-\top} \hat{\mathbf E}^1(\boldsymbol \eta_p) \,, + \frac{\text{d} \mathbf{v}_p}{\text{d} t} = \kappa \, DF^{-\top} \hat{\mathbf E}^1(\boldsymbol \eta_p) \,, - which is solved analytically. :math:`\mathbf E` can optionally be defined - through a potential, :math:`\mathbf E = - \nabla \phi`. + which is solved analytically. """ - class Variables: - def __init__(self): - self._var: PICVariable | SPHVariable = None - - @property - def var(self) -> PICVariable | SPHVariable: - return self._var - - @var.setter - def var(self, new): - assert isinstance(new, PICVariable | SPHVariable) - assert new.space in ("Particles6D", "DeltaFParticles6D", "ParticlesSPH") - self._var = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # propagator options - e_field: FEECVariable | tuple[Callable] = None - phi: FEECVariable | Callable = None - - def __post_init__(self): - # checks - if self.e_field is not None: - assert isinstance(self.e_field, tuple[Callable]) or self.e_field.space == "Hcurl" - else: - if self.phi is not None: - assert isinstance(self.phi, Callable) or self.phi.space == "H1" - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - # scaling factor - self._epsilon = self.variables.var.species.equation_params.epsilon - - self._e_field = None - - if self.options.e_field is not None: - if isinstance(self.options.e_field, tuple[Callable]): - self._e_field = self.derham.P["1"](self.options.e_field) - else: - self._e_field = self.options.e_field.spline.vector + @staticmethod + def options(): + pass - if self.options.phi is not None: - if isinstance(self.options.phi, Callable): - _phi = self.derham.P["0"](self.options.phi) - else: - _phi = self.options.phi.spline.vector - self._e_field = self.derham.grad.dot(_phi) - self._e_field.update_ghost_regions() # very important, we will move it inside grad - self._e_field *= -1.0 + def __init__( + self, + particles: Particles6D, + *, + e_field: BlockVector | PolarVector, + kappa: float = 1.0, + ): + super().__init__(particles) - if self._e_field is not None: - # instantiate Pusher - args_kernel = ( - self.derham.args_derham, - self._e_field[0]._data, - self._e_field[1]._data, - self._e_field[2]._data, - 1.0 / self._epsilon, - ) + self.kappa = kappa - self._pusher = Pusher( - self.variables.var.particles, - Pyccelkernel(pusher_kernels.push_v_with_efield), - args_kernel, - self.domain.args_domain, - alpha_in_kernel=1.0, - ) + assert isinstance(e_field, (BlockVector, PolarVector)) + self._e_field = e_field + + # instantiate Pusher + args_kernel = ( + self.derham.args_derham, + self._e_field[0]._data, + self._e_field[1]._data, + self._e_field[2]._data, + self.kappa, + ) + + self._pusher = Pusher( + particles, + Pyccelkernel(pusher_kernels.push_v_with_efield), + args_kernel, + self.domain.args_domain, + alpha_in_kernel=1.0, + ) def __call__(self, dt): - if self._e_field is not None: - self._pusher(dt) + """ + TODO + """ + self._pusher(dt) class PushEtaPC(Propagator): @@ -398,106 +298,85 @@ class PushEtaPC(Propagator): * ``heun3`` (3rd order) """ - class Variables: - def __init__(self): - self._var: PICVariable | SPHVariable = None - - @property - def var(self) -> PICVariable | SPHVariable: - return self._var - - @var.setter - def var(self, new): - assert isinstance(new, PICVariable | SPHVariable) - self._var = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - butcher: ButcherTableau = None - use_perp_model: bool = True - u_tilde: FEECVariable = None - u_space: OptsVecSpace = "Hdiv" - - def __post_init__(self): - # checks - check_option(self.u_space, OptsVecSpace) - assert isinstance(self.u_tilde, FEECVariable) - - # defaults - if self.butcher is None: - self.butcher = ButcherTableau() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._u_tilde = self.options.u_tilde.spline.vector - - # get kernell: - if self.options.u_space == "Hcurl": - kernel = Pyccelkernel(pusher_kernels.push_pc_eta_stage_Hcurl) - elif self.options.u_space == "Hdiv": - kernel = Pyccelkernel(pusher_kernels.push_pc_eta_stage_Hdiv) - elif self.options.u_space == "H1vec": - kernel = Pyccelkernel(pusher_kernels.push_pc_eta_stage_H1vec) - else: - raise ValueError( - f'{self.options.u_space =} not valid, choose from "Hcurl", "Hdiv" or "H1vec.', - ) + @staticmethod + def options(default=False): + dct = {} + dct["use_perp_model"] = [True, False] - # define algorithm - butcher = self.options.butcher - # temp fix due to refactoring of ButcherTableau: - import cunumpy as xp + if default: + dct = descend_options_dict(dct, []) - butcher._a = xp.diag(butcher.a, k=-1) - butcher._a = xp.array(list(butcher.a) + [0.0]) + return dct + + def __init__( + self, + particles: Particles, + *, + u: BlockVector | PolarVector, + use_perp_model: bool = options(default=True)["use_perp_model"], + u_space: str, + ): + super().__init__(particles) + + assert isinstance(u, (BlockVector, PolarVector)) + + self._u = u + + # call Pusher class + if use_perp_model: + if u_space == "Hcurl": + kernel = Pyccelkernel(pusher_kernels.push_pc_eta_rk4_Hcurl) + elif u_space == "Hdiv": + kernel = Pyccelkernel(pusher_kernels.push_pc_eta_rk4_Hdiv) + elif u_space == "H1vec": + kernel = Pyccelkernel(pusher_kernels.push_pc_eta_rk4_H1vec) + else: + raise ValueError( + f'{u_space = } not valid, choose from "Hcurl", "Hdiv" or "H1vec.', + ) + else: + if u_space == "Hcurl": + kernel = Pyccelkernel(pusher_kernels.push_pc_eta_rk4_Hcurl_full) + elif u_space == "Hdiv": + kernel = Pyccelkernel(pusher_kernels.push_pc_eta_rk4_Hdiv_full) + elif u_space == "H1vec": + kernel = Pyccelkernel(pusher_kernels.push_pc_eta_rk4_H1vec_full) + else: + raise ValueError( + f'{u_space = } not valid, choose from "Hcurl", "Hdiv" or "H1vec.', + ) args_kernel = ( self.derham.args_derham, - self._u_tilde[0]._data, - self._u_tilde[1]._data, - self._u_tilde[2]._data, - self.options.use_perp_model, - butcher.a, - butcher.b, - butcher.c, + self._u[0]._data, + self._u[1]._data, + self._u[2]._data, ) self._pusher = Pusher( - self.variables.var.particles, + particles, kernel, args_kernel, self.domain.args_domain, alpha_in_kernel=1.0, - n_stages=butcher.n_stages, + n_stages=4, mpi_sort="each", ) def __call__(self, dt): - self._u_tilde.update_ghost_regions() + # check if ghost regions are synchronized + if not self._u[0].ghost_regions_in_sync: + self._u[0].update_ghost_regions() + if not self._u[1].ghost_regions_in_sync: + self._u[1].update_ghost_regions() + if not self._u[2].ghost_regions_in_sync: + self._u[2].update_ghost_regions() self._pusher(dt) # update_weights - if self.variables.var.particles.control_variate: - self.variables.var.particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() class PushGuidingCenterBxEstar(Propagator): @@ -530,74 +409,41 @@ class PushGuidingCenterBxEstar(Propagator): * :func:`~struphy.pic.pushing.pusher_kernels_gc.push_gc_bxEstar_discrete_gradient_2nd_order` """ - class Variables: - def __init__(self): - self._ions: PICVariable = None - - @property - def ions(self) -> PICVariable: - return self._ions - - @ions.setter - def ions(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles5D" - self._ions = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsAlgo = Literal[ - "discrete_gradient_2nd_order", - "discrete_gradient_1st_order", - "discrete_gradient_1st_order_newton", - "explicit", - ] - # propagator options - phi: FEECVariable = None - evaluate_e_field: bool = False - b_tilde: FEECVariable = None - algo: OptsAlgo = "discrete_gradient_1st_order" - butcher: ButcherTableau = None - maxiter: int = 20 - tol: float = 1e-7 - mpi_sort: OptsMPIsort = "each" - verbose: bool = False - - def __post_init__(self): - # checks - check_option(self.algo, self.OptsAlgo) - check_option(self.mpi_sort, OptsMPIsort) - - # defaults - if self.phi is None: - self.phi = FEECVariable(space="H1") - - if self.algo == "explicit" and self.butcher is None: - self.butcher = ButcherTableau() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - # scaling factor - self._epsilon = self.variables.ions.species.equation_params.epsilon + @staticmethod + def options(default=False): + dct = {} + dct["algo"] = { + "method": [ + "discrete_gradient_2nd_order", + "discrete_gradient_1st_order", + "discrete_gradient_1st_order_newton", + "rk4", + "forward_euler", + "heun2", + "rk2", + "heun3", + ], + "maxiter": 20, + "tol": 1e-7, + "mpi_sort": "each", + "verbose": False, + } + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + particles: Particles5D, + *, + phi: StencilVector = None, + evaluate_e_field: bool = False, + b_tilde: BlockVector = None, + epsilon: float = 1.0, + algo: dict = options(default=True)["algo"], + ): + super().__init__(particles) # magnetic equilibrium field unit_b1 = self.projected_equil.unit_b1 @@ -606,13 +452,14 @@ def allocate(self): curl_unit_b_dot_b0 = self.projected_equil.curl_unit_b_dot_b0 # magnetic perturbation - if self.options.b_tilde is not None: + self._b_tilde = b_tilde + if self._b_tilde is not None: self._B_dot_b = self.derham.Vh["0"].zeros() self._grad_b_full = self.derham.Vh["1"].zeros() self._PB = getattr(self.basis_ops, "PB") - B_dot_b = self._PB.dot(self.options.b_tilde.spline.vector, out=self._B_dot_b) + B_dot_b = self._PB.dot(self._b_tilde, out=self._B_dot_b) B_dot_b.update_ghost_regions() grad_b_full = self.derham.grad.dot(B_dot_b, out=self._grad_b_full) @@ -625,19 +472,20 @@ def allocate(self): self._B_dot_b = self._absB0 # allocate electric field - self.options.phi.allocate(self.derham, self.domain) - self._phi = self.options.phi.spline.vector - self._evaluate_e_field = self.options.evaluate_e_field + if phi is None: + phi = self.derham.Vh["0"].zeros() + self._evaluate_e_field = False + self._phi = phi + self._evaluate_e_field = evaluate_e_field self._e_field = self.derham.Vh["1"].zeros() + self._epsilon = epsilon # choose method - particles = self.variables.ions.particles - - if "discrete_gradient" in self.options.algo: + if "discrete_gradient" in algo["method"]: # place for storing data during iteration first_free_idx = particles.args_markers.first_free_idx - if "1st_order" in self.options.algo: + if "1st_order" in algo["method"]: # init kernels self.add_init_kernel( eval_kernels_gc.driftkinetic_hamiltonian, @@ -676,7 +524,7 @@ def allocate(self): ), ) - if "newton" in self.options.algo: + if "newton" in algo["method"]: # eval kernels self.add_eval_kernel( eval_kernels_gc.driftkinetic_hamiltonian, @@ -792,7 +640,7 @@ def allocate(self): self._evaluate_e_field, ) - elif "2nd_order" in self.options.algo: + elif "2nd_order" in algo["method"]: # init kernels (evaluate at eta^n and save) self.add_init_kernel( eval_kernels_gc.driftkinetic_hamiltonian, @@ -843,6 +691,11 @@ def allocate(self): self._evaluate_e_field, ) + else: + raise NotImplementedError( + f"Chosen method {algo['method']} is not implemented.", + ) + # Pusher instance self._pusher = Pusher( particles, @@ -852,22 +705,19 @@ def allocate(self): alpha_in_kernel=alpha_in_kernel, init_kernels=self.init_kernels, eval_kernels=self.eval_kernels, - maxiter=self.options.maxiter, - tol=self.options.tol, - mpi_sort=self.options.mpi_sort, - verbose=self.options.verbose, + maxiter=algo["maxiter"], + tol=algo["tol"], + mpi_sort=algo["mpi_sort"], + verbose=algo["verbose"], ) else: - if self.options.butcher is None: - butcher = ButcherTableau() - else: - butcher = self.options.butcher + butcher = ButcherTableau(algo["method"]) # temp fix due to refactoring of ButcherTableau: - import cunumpy as xp + from struphy.utils.arrays import xp as np - butcher._a = xp.diag(butcher.a, k=-1) - butcher._a = xp.array(list(butcher.a) + [0.0]) + butcher._a = np.diag(butcher.a, k=-1) + butcher._a = np.array(list(butcher.a) + [0.0]) kernel = Pyccelkernel(pusher_kernels_gc.push_gc_bxEstar_explicit_multistage) @@ -898,11 +748,10 @@ def allocate(self): self.domain.args_domain, alpha_in_kernel=1.0, n_stages=butcher.n_stages, - mpi_sort=self.options.mpi_sort, - verbose=self.options.verbose, + mpi_sort=algo["mpi_sort"], + verbose=algo["verbose"], ) - @profile def __call__(self, dt): # electric field # TODO: add out to __neg__ of StencilVector @@ -911,8 +760,8 @@ def __call__(self, dt): e_field.update_ghost_regions() # magnetic perturbation - if self.options.b_tilde is not None: - B_dot_b = self._PB.dot(self.options.b_tilde.spline.vector, out=self._B_dot_b) + if self._b_tilde is not None: + B_dot_b = self._PB.dot(self._b_tilde, out=self._B_dot_b) B_dot_b.update_ghost_regions() grad_b_full = self.derham.grad.dot(B_dot_b, out=self._grad_b_full) @@ -925,8 +774,8 @@ def __call__(self, dt): self._pusher(dt) # update_weights - if self.variables.ions.species.weights_params.control_variate: - self.variables.ions.particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() class PushGuidingCenterParallel(Propagator): @@ -971,74 +820,43 @@ class PushGuidingCenterParallel(Propagator): * :func:`~struphy.pic.pushing.pusher_kernels_gc.push_gc_Bstar_discrete_gradient_2nd_order` """ - class Variables: - def __init__(self): - self._ions: PICVariable = None - - @property - def ions(self) -> PICVariable: - return self._ions - - @ions.setter - def ions(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles5D" - self._ions = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsAlgo = Literal[ - "discrete_gradient_2nd_order", - "discrete_gradient_1st_order", - "discrete_gradient_1st_order_newton", - "explicit", - ] - # propagator options - phi: FEECVariable = None - evaluate_e_field: bool = False - b_tilde: FEECVariable = None - algo: OptsAlgo = "discrete_gradient_1st_order" - butcher: ButcherTableau = None - maxiter: int = 20 - tol: float = 1e-7 - mpi_sort: OptsMPIsort = "each" - verbose: bool = False - - def __post_init__(self): - # checks - check_option(self.algo, self.OptsAlgo) - check_option(self.mpi_sort, OptsMPIsort) - - # defaults - if self.phi is None: - self.phi = FEECVariable(space="H1") - - if self.algo == "explicit" and self.butcher is None: - self.butcher = ButcherTableau() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - # scaling factor - self._epsilon = self.variables.ions.species.equation_params.epsilon + @staticmethod + def options(default=False): + dct = {} + dct["algo"] = { + "method": [ + "discrete_gradient_2nd_order", + "discrete_gradient_1st_order", + "discrete_gradient_1st_order_newton", + "rk4", + "forward_euler", + "heun2", + "rk2", + "heun3", + ], + "maxiter": 20, + "tol": 1e-7, + "mpi_sort": "each", + "verbose": False, + } + if default: + dct = descend_options_dict(dct, []) + + return dct + + def __init__( + self, + particles: Particles5D, + *, + phi: StencilVector = None, + evaluate_e_field: bool = False, + b_tilde: BlockVector = None, + epsilon: float = 1.0, + algo: dict = options(default=True)["algo"], + ): + super().__init__(particles) + + self._epsilon = epsilon # magnetic equilibrium field self._gradB1 = self.projected_equil.gradB1 @@ -1048,13 +866,14 @@ def allocate(self): curl_unit_b_dot_b0 = self.projected_equil.curl_unit_b_dot_b0 # magnetic perturbation - if self.options.b_tilde is not None: + self._b_tilde = b_tilde + if self._b_tilde is not None: self._B_dot_b = self.derham.Vh["0"].zeros() self._grad_b_full = self.derham.Vh["1"].zeros() self._PB = getattr(self.basis_ops, "PB") - B_dot_b = self._PB.dot(self.options.b_tilde.spline.vector, out=self._B_dot_b) + B_dot_b = self._PB.dot(self._b_tilde, out=self._B_dot_b) B_dot_b.update_ghost_regions() grad_b_full = self.derham.grad.dot(B_dot_b, out=self._grad_b_full) @@ -1067,19 +886,19 @@ def allocate(self): self._B_dot_b = self._absB0 # allocate electric field - self.options.phi.allocate(self.derham, domain=self.domain) - self._phi = self.options.phi.spline.vector - self._evaluate_e_field = self.options.evaluate_e_field + if phi is None: + phi = self.derham.Vh["0"].zeros() + self._phi = phi + self._evaluate_e_field = evaluate_e_field self._e_field = self.derham.Vh["1"].zeros() + self._epsilon = epsilon # choose method - particles = self.variables.ions.particles - - if "discrete_gradient" in self.options.algo: + if "discrete_gradient" in algo["method"]: # place for storing data during iteration first_free_idx = particles.args_markers.first_free_idx - if "1st_order" in self.options.algo: + if "1st_order" in algo["method"]: # init kernels self.add_init_kernel( eval_kernels_gc.driftkinetic_hamiltonian, @@ -1122,7 +941,7 @@ def allocate(self): ), ) - if "newton" in self.options.algo: + if "newton" in algo["method"]: # eval kernels self.add_eval_kernel( eval_kernels_gc.driftkinetic_hamiltonian, @@ -1237,7 +1056,7 @@ def allocate(self): self._evaluate_e_field, ) - elif "2nd_order" in self.options.algo: + elif "2nd_order" in algo["method"]: # init kernels (evaluate at eta^n and save) self.add_init_kernel( eval_kernels_gc.driftkinetic_hamiltonian, @@ -1291,6 +1110,11 @@ def allocate(self): self._evaluate_e_field, ) + else: + raise NotImplementedError( + f"Chosen method {algo['method']} is not implemented.", + ) + # Pusher instance self._pusher = Pusher( particles, @@ -1300,22 +1124,19 @@ def allocate(self): alpha_in_kernel=alpha_in_kernel, init_kernels=self.init_kernels, eval_kernels=self.eval_kernels, - maxiter=self.options.maxiter, - tol=self.options.tol, - mpi_sort=self.options.mpi_sort, - verbose=self.options.verbose, + maxiter=algo["maxiter"], + tol=algo["tol"], + mpi_sort=algo["mpi_sort"], + verbose=algo["verbose"], ) else: - if self.options.butcher is None: - butcher = ButcherTableau() - else: - butcher = self.options.butcher + butcher = ButcherTableau(algo["method"]) # temp fix due to refactoring of ButcherTableau: - import cunumpy as xp + from struphy.utils.arrays import xp as np - butcher._a = xp.diag(butcher.a, k=-1) - butcher._a = xp.array(list(butcher.a) + [0.0]) + butcher._a = np.diag(butcher.a, k=-1) + butcher._a = np.array(list(butcher.a) + [0.0]) kernel = Pyccelkernel(pusher_kernels_gc.push_gc_Bstar_explicit_multistage) @@ -1349,11 +1170,10 @@ def allocate(self): self.domain.args_domain, alpha_in_kernel=1.0, n_stages=butcher.n_stages, - mpi_sort=self.options.mpi_sort, - verbose=self.options.verbose, + mpi_sort=algo["mpi_sort"], + verbose=algo["verbose"], ) - @profile def __call__(self, dt): # electric field # TODO: add out to __neg__ of StencilVector @@ -1362,8 +1182,8 @@ def __call__(self, dt): e_field.update_ghost_regions() # magnetic perturbation - if self.options.b_tilde is not None: - B_dot_b = self._PB.dot(self.options.b_tilde.spline.vector, out=self._B_dot_b) + if self._b_tilde is not None: + B_dot_b = self._PB.dot(self._b_tilde, out=self._B_dot_b) B_dot_b.update_ghost_regions() grad_b_full = self.derham.grad.dot(B_dot_b, out=self._grad_b_full) @@ -1376,8 +1196,8 @@ def __call__(self, dt): self._pusher(dt) # update_weights - if self.variables.ions.species.weights_params.control_variate: - self.variables.ions.particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() class PushDeterministicDiffusion(Propagator): @@ -1401,65 +1221,39 @@ class PushDeterministicDiffusion(Propagator): * Explicit from :class:`~struphy.ode.utils.ButcherTableau` """ - class Variables: - def __init__(self): - self._var: PICVariable = None - - @property - def var(self) -> PICVariable: - return self._var - - @var.setter - def var(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles3D" - self._var = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - butcher: ButcherTableau = None - bc_type: tuple = ("periodic", "periodic", "periodic") - diff_coeff: float = 1.0 - - def __post_init__(self): - # defaults - if self.butcher is None: - self.butcher = ButcherTableau() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._bc_type = self.options.bc_type - self._diffusion = self.options.diff_coeff + @staticmethod + def options(default=False): + dct = {} + dct["algo"] = ["rk4", "forward_euler", "heun2", "rk2", "heun3"] + dct["diffusion_coefficient"] = 1.0 + if default: + dct = descend_options_dict(dct, []) + return dct + + def __init__( + self, + particles: Particles3D, + *, + algo: str = options(default=True)["algo"], + bc_type: list = ["periodic", "periodic", "periodic"], + diffusion_coefficient: float = options()["diffusion_coefficient"], + ): + from struphy.pic.accumulation.particles_to_grid import AccumulatorVector + + super().__init__(particles) + + self._bc_type = bc_type + self._diffusion = diffusion_coefficient self._tmp = self.derham.Vh["1"].zeros() # choose algorithm - self._butcher = self.options.butcher + self._butcher = ButcherTableau(algo) # temp fix due to refactoring of ButcherTableau: - import cunumpy as xp - - self._butcher._a = xp.diag(self._butcher.a, k=-1) - self._butcher._a = xp.array(list(self._butcher.a) + [0.0]) + from struphy.utils.arrays import xp as np - particles = self.variables.var.particles + self._butcher._a = np.diag(self._butcher.a, k=-1) + self._butcher._a = np.array(list(self._butcher.a) + [0.0]) self._u_on_grid = AccumulatorVector( particles, @@ -1496,10 +1290,9 @@ def __call__(self, dt): """ TODO """ - particles = self.variables.var.particles # accumulate - self._u_on_grid() + self._u_on_grid(self.particles[0].vdim) # take gradient pi_u = self._u_on_grid.vectors[0] @@ -1510,8 +1303,8 @@ def __call__(self, dt): self._pusher(dt) # update_weights - if particles.control_variate: - particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() class PushRandomDiffusion(Propagator): @@ -1534,64 +1327,36 @@ class PushRandomDiffusion(Propagator): * ``forward_euler`` (1st order) """ - class Variables: - def __init__(self): - self._var: PICVariable = None - - @property - def var(self) -> PICVariable: - return self._var - - @var.setter - def var(self, new): - assert isinstance(new, PICVariable) - assert new.space == "Particles3D" - self._var = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - butcher: ButcherTableau = None - bc_type: tuple = ("periodic", "periodic", "periodic") - diff_coeff: float = 1.0 - - def __post_init__(self): - # defaults - if self.butcher is None: - self.butcher = ButcherTableau() - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - self._bc_type = self.options.bc_type - self._diffusion = self.options.diff_coeff - - particles = self.variables.var.particles - - self._noise = array(particles.markers[:, :3]) - - self._butcher = self.options.butcher + @staticmethod + def options(default=False): + dct = {} + dct["algo"] = ["forward_euler"] + dct["diffusion_coefficient"] = 1.0 + if default: + dct = descend_options_dict(dct, []) + return dct + + def __init__( + self, + particles: Particles3D, + algo: str = options(default=True)["algo"], + bc_type: list = ["periodic", "periodic", "periodic"], + diffusion_coefficient: float = options()["diffusion_coefficient"], + ): + super().__init__(particles) + + self._bc_type = bc_type + self._diffusion = diffusion_coefficient + + self._noise = array(self.particles[0].markers[:, :3]) + + # choose algorithm + self._butcher = ButcherTableau("forward_euler") # temp fix due to refactoring of ButcherTableau: - import cunumpy as xp + from struphy.utils.arrays import xp as np - self._butcher._a = xp.diag(self._butcher.a, k=-1) - self._butcher._a = xp.array(list(self._butcher.a) + [0.0]) + self._butcher._a = np.diag(self._butcher.a, k=-1) + self._butcher._a = np.array(list(self._butcher.a) + [0.0]) # instantiate Pusher args_kernel = ( @@ -1621,20 +1386,18 @@ def __call__(self, dt): TODO """ - particles = self.variables.var.particles - self._noise[:] = random.multivariate_normal( self._mean, self._cov, - len(particles.markers), + len(self.particles[0].markers), ) # push markers self._pusher(dt) # update_weights - if particles.control_variate: - particles.update_weights() + if self.particles[0].control_variate: + self.particles[0].update_weights() class PushVinSPHpressure(Propagator): @@ -1656,155 +1419,15 @@ class PushVinSPHpressure(Propagator): * Explicit from :class:`~struphy.ode.utils.ButcherTableau` """ - class Variables: - def __init__(self): - self._fluid: SPHVariable = None - - @property - def fluid(self) -> SPHVariable: - return self._fluid - - @fluid.setter - def fluid(self, new): - assert isinstance(new, SPHVariable) - assert new.space == "ParticlesSPH" - self._fluid = new - - def __init__(self): - self.variables = self.Variables() - - @dataclass - class Options: - # specific literals - OptsAlgo = Literal["forward_euler"] - OptsThermo = Literal["isothermal", "polytropic"] - # propagator options - kernel_type: OptsKernel = "gaussian_2d" - kernel_width: tuple = None - algo: OptsAlgo = "forward_euler" - gravity: tuple = (0.0, 0.0, 0.0) - thermodynamics: OptsThermo = "isothermal" - - def __post_init__(self): - # checks - check_option(self.kernel_type, OptsKernel) - check_option(self.algo, self.OptsAlgo) - check_option(self.thermodynamics, self.OptsThermo) - - @property - def options(self) -> Options: - if not hasattr(self, "_options"): - self._options = self.Options() - return self._options - - @options.setter - def options(self, new): - assert isinstance(new, self.Options) - if MPI.COMM_WORLD.Get_rank() == 0: - print(f"\nNew options for propagator '{self.__class__.__name__}':") - for k, v in new.__dict__.items(): - print(f" {k}: {v}") - self._options = new - - @profile - def allocate(self): - # init kernel for evaluating density etc. before each time step. - init_kernel = eval_kernels_gc.sph_pressure_coeffs - - particles = self.variables.fluid.particles - - first_free_idx = particles.args_markers.first_free_idx - comps = (0, 1, 2) - - boxes = particles.sorting_boxes.boxes - neighbours = particles.sorting_boxes.neighbours - holes = particles.holes - periodic = [bci == "periodic" for bci in particles.bc] - kernel_nr = particles.ker_dct()[self.options.kernel_type] - - if self.options.kernel_width is None: - self.options.kernel_width = tuple([1 / ni for ni in particles.boxes_per_dim]) - else: - assert all([hi <= 1 / ni for hi, ni in zip(self.options.kernel_width, particles.boxes_per_dim)]) - - # init kernel - args_init = ( - boxes, - neighbours, - holes, - *periodic, - kernel_nr, - *self.options.kernel_width, - ) - - self.add_init_kernel( - init_kernel, - first_free_idx, - comps, - args_init, - ) - - # pusher kernel - if self.options.thermodynamics == "isothermal": - kernel = Pyccelkernel(pusher_kernels.push_v_sph_pressure) - elif self.options.thermodynamics == "polytropic": - kernel = Pyccelkernel(pusher_kernels.push_v_sph_pressure_ideal_gas) - - gravity = xp.array(self.options.gravity, dtype=float) - - args_kernel = ( - boxes, - neighbours, - holes, - *periodic, - kernel_nr, - *self.options.kernel_width, - gravity, - ) - - # the Pusher class wraps around all kernels - self._pusher = Pusher( - particles, - kernel, - args_kernel, - self.domain.args_domain, - alpha_in_kernel=0.0, - init_kernels=self.init_kernels, - ) - - @profile - def __call__(self, dt): - self.variables.fluid.particles.put_particles_in_boxes() - self._pusher(dt) - - -class PushVinViscousPotential2D(Propagator): - r"""For each marker :math:`p`, solves - - .. math:: - - \frac{\textnormal d \mathbf v_p(t)}{\textnormal d t} = \kappa_p \sum_{i=1}^N w_i \left( \frac{1}{\rho^{N,h}(\boldsymbol \eta_p)} + \frac{1}{\rho^{N,h}(\boldsymbol \eta_i)} \right) DF^{-\top}\nabla W_h(\boldsymbol \eta_p - \boldsymbol \eta_i) \,, - - where :math:`DF^{-\top}` denotes the inverse transpose Jacobian, and with the smoothed density - - .. math:: - - \rho^{N,h}(\boldsymbol \eta) = \frac 1N \sum_{j=1}^N w_j \, W_h(\boldsymbol \eta - \boldsymbol \eta_j)\,, - - where :math:`W_h(\boldsymbol \eta)` is a smoothing kernel from :mod:`~struphy.pic.sph_smoothing_kernels`. - Time stepping: - - * Explicit from :class:`~struphy.ode.utils.ButcherTableau` - """ - @staticmethod def options(default=False): dct = {} dct["kernel_type"] = list(Particles.ker_dct()) - dct["kernel_width"] = None dct["algo"] = [ "forward_euler", ] # "heun2", "rk2", "heun3", "rk4"] + dct["gravity"] = (0.0, 0.0, 0.0) + dct["thermodynamics"] = ["isothermal", "polytropic"] if default: dct = descend_options_dict(dct, []) return dct @@ -1816,24 +1439,18 @@ def __init__( kernel_type: str = "gaussian_2d", kernel_width: tuple = None, algo: str = options(default=True)["algo"], # TODO: implement other algos than forward Euler + gravity: tuple = options(default=True)["gravity"], + thermodynamics: str = options(default=True)["thermodynamics"], ): # base class constructor call super().__init__(particles) # init kernel for evaluating density etc. before each time step. - init_kernel_1 = Pyccelkernel(eval_kernels_gc.sph_mean_velocity_coeffs) + init_kernel = Pyccelkernel(eval_kernels_gc.sph_pressure_coeffs) + first_free_idx = particles.args_markers.first_free_idx comps = (0, 1, 2) - init_kernel_2 = Pyccelkernel(eval_kernels_gc.sph_mean_velocity) - # first_free_idx = particles.args_markers.first_free_idx - # comps = (0, 1, 2) - - init_kernel_3 = Pyccelkernel(eval_kernels_gc.sph_grad_mean_velocity) - comps_tensor = (0, 1, 2, 3, 4, 5, 6, 7, 8) - - init_kernel_4 = Pyccelkernel(eval_kernels_gc.sph_viscosity_tensor) - boxes = particles.sorting_boxes.boxes neighbours = particles.sorting_boxes.neighbours holes = particles.holes @@ -1856,34 +1473,19 @@ def __init__( ) self.add_init_kernel( - init_kernel_1, + init_kernel, first_free_idx, comps, args_init, ) - self.add_init_kernel( - init_kernel_2, - first_free_idx + 3, # +3 so that the previous one is not overwritten - comps, - args_init, - ) - - self.add_init_kernel( - init_kernel_3, - first_free_idx + 6, # +3 so that the previous one is not overwritten - comps_tensor, - args_init, - ) - - self.add_init_kernel( - init_kernel_4, - first_free_idx + 15, - comps_tensor, - args_init, - ) + # pusher kernel + if thermodynamics == "isothermal": + kernel = Pyccelkernel(pusher_kernels.push_v_sph_pressure) + elif thermodynamics == "polytropic": + kernel = Pyccelkernel(pusher_kernels.push_v_sph_pressure_ideal_gas) - kernel = Pyccelkernel(pusher_kernels.push_v_viscosity) + gravity = np.array(gravity, dtype=float) args_kernel = ( boxes, @@ -1892,6 +1494,7 @@ def __init__( *periodic, kernel_nr, *kernel_width, + gravity, ) # the Pusher class wraps around all kernels @@ -1909,7 +1512,7 @@ def __call__(self, dt): self._pusher(dt) -class PushVinViscousPotential3D(Propagator): +class PushVinViscousPotential(Propagator): r"""For each marker :math:`p`, solves .. math:: diff --git a/src/struphy/propagators/tests/test_gyrokinetic_poisson.py b/src/struphy/propagators/tests/test_gyrokinetic_poisson.py index 747ec65c7..6b0714f4b 100644 --- a/src/struphy/propagators/tests/test_gyrokinetic_poisson.py +++ b/src/struphy/propagators/tests/test_gyrokinetic_poisson.py @@ -1,4 +1,3 @@ -import cunumpy as xp import matplotlib.pyplot as plt import pytest from psydac.ddm.mpi import mpi as MPI @@ -7,11 +6,9 @@ from struphy.feec.projectors import L2Projector from struphy.feec.psydac_derham import Derham from struphy.geometry import domains -from struphy.geometry.base import Domain -from struphy.linear_algebra.solver import SolverParameters -from struphy.models.variables import FEECVariable +from struphy.propagators import ImplicitDiffusion from struphy.propagators.base import Propagator -from struphy.propagators.propagators_fields import ImplicitDiffusion +from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -27,19 +24,27 @@ ["Orthogonal", {"Lx": 4.0, "Ly": 2.0, "alpha": 0.1, "Lz": 3.0}], ], ) -@pytest.mark.parametrize("projected_rhs", [False, True]) -def test_poisson_M1perp_1d(direction, bc_type, mapping, projected_rhs, show_plot=False): +def test_poisson_M1perp_1d(direction, bc_type, mapping, show_plot=False): """ Test the convergence of Poisson solver with M1perp diffusion matrix in 1D by means of manufactured solutions. """ + solver_params = { + "type": ("pcg", "MassMatrixPreconditioner"), + "tol": 1.0e-13, + "maxiter": 3000, + "info": True, + "verbose": False, + "recycle": False, + } + # create domain object dom_type = mapping[0] dom_params = mapping[1] domain_class = getattr(domains, dom_type) - domain: Domain = domain_class(**dom_params) + domain = domain_class(**dom_params) if dom_type == "Cuboid": Lx = dom_params["r1"] - dom_params["l1"] @@ -56,9 +61,9 @@ def test_poisson_M1perp_1d(direction, bc_type, mapping, projected_rhs, show_plot errors = [] h_vec = [] if show_plot: - plt.figure(f"degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}", figsize=(24, 16)) - plt.figure(f"degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}", figsize=(24, 16)) - plt.figure(f"degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}", figsize=(24, 16)) + plt.figure(f"degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }", figsize=(24, 16)) + plt.figure(f"degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }", figsize=(24, 16)) + plt.figure(f"degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }", figsize=(24, 16)) for n, Neli in enumerate(Nels): # boundary conditions (overwritten below) @@ -72,57 +77,54 @@ def test_poisson_M1perp_1d(direction, bc_type, mapping, projected_rhs, show_plot if direction == 0: Nel = [Neli, 1, 1] p = [pi, 1, 1] - e1 = xp.linspace(0.0, 1.0, 50) + e1 = np.linspace(0.0, 1.0, 50) if bc_type == "neumann": spl_kind = [False, True, True] def sol1_xyz(x, y, z): - return xp.cos(xp.pi / Lx * x) + return np.cos(np.pi / Lx * x) def rho1_xyz(x, y, z): - return xp.cos(xp.pi / Lx * x) * (xp.pi / Lx) ** 2 + return np.cos(np.pi / Lx * x) * (np.pi / Lx) ** 2 else: if bc_type == "dirichlet": spl_kind = [False, True, True] - dirichlet_bc = [(not kd,) * 2 for kd in spl_kind] - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[not kd] * 2 for kd in spl_kind] def sol1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lx * x) + return np.sin(2 * np.pi / Lx * x) def rho1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lx * x) * (2 * xp.pi / Lx) ** 2 + return np.sin(2 * np.pi / Lx * x) * (2 * np.pi / Lx) ** 2 elif direction == 1: Nel = [1, Neli, 1] p = [1, pi, 1] - e2 = xp.linspace(0.0, 1.0, 50) + e2 = np.linspace(0.0, 1.0, 50) if bc_type == "neumann": spl_kind = [True, False, True] def sol1_xyz(x, y, z): - return xp.cos(xp.pi / Ly * y) + return np.cos(np.pi / Ly * y) def rho1_xyz(x, y, z): - return xp.cos(xp.pi / Ly * y) * (xp.pi / Ly) ** 2 + return np.cos(np.pi / Ly * y) * (np.pi / Ly) ** 2 else: if bc_type == "dirichlet": spl_kind = [True, False, True] - dirichlet_bc = [(not kd,) * 2 for kd in spl_kind] - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[not kd] * 2 for kd in spl_kind] def sol1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Ly * y) + return np.sin(2 * np.pi / Ly * y) def rho1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Ly * y) * (2 * xp.pi / Ly) ** 2 + return np.sin(2 * np.pi / Ly * y) * (2 * np.pi / Ly) ** 2 else: print("Direction should be either 0 or 1") # create derham object - print(f"{dirichlet_bc =}") derham = Derham(Nel, p, spl_kind, dirichlet_bc=dirichlet_bc, comm=comm) # mass matrices @@ -133,57 +135,35 @@ def rho1_xyz(x, y, z): Propagator.mass_ops = mass_ops # pullbacks of right-hand side - def rho_pulled(e1, e2, e3): - return domain.pull(rho1_xyz, e1, e2, e3, kind="0", squeeze_out=False) - - # define how to pass rho - if projected_rhs: - rho = FEECVariable(space="H1") - rho.allocate(derham=derham, domain=domain) - rho.spline.vector = derham.P["0"](rho_pulled) - else: - rho = rho_pulled - - # create Poisson solver - solver_params = SolverParameters( - tol=1.0e-13, - maxiter=3000, - info=True, - verbose=False, - recycle=False, - ) - - _phi = FEECVariable(space="H1") - _phi.allocate(derham=derham, domain=domain) + def rho1(e1, e2, e3): + return domain.pull(rho1_xyz, e1, e2, e3, kind="0", squeeze_out=True) - poisson_solver = ImplicitDiffusion() - poisson_solver.variables.phi = _phi + rho_vec = L2Projector("H1", mass_ops).get_dofs(rho1, apply_bc=True) - poisson_solver.options = poisson_solver.Options( + # create Poisson solver + _phi = derham.create_spline_function("phi", "H1") + poisson_solver = ImplicitDiffusion( + _phi.vector, sigma_1=1e-12, sigma_2=0.0, sigma_3=1.0, divide_by_dt=True, diffusion_mat="M1perp", - rho=rho, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, + rho=rho_vec, + solver=solver_params, ) - poisson_solver.allocate() - # Solve Poisson (call propagator with dt=1.) dt = 1.0 poisson_solver(dt) # push numerical solution and compare - sol_val1 = domain.push(_phi.spline, e1, e2, e3, kind="0") + sol_val1 = domain.push(_phi, e1, e2, e3, kind="0") x, y, z = domain(e1, e2, e3) analytic_value1 = sol1_xyz(x, y, z) if show_plot: - plt.figure(f"degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}") + plt.figure(f"degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }") plt.subplot(2, 3, n + 1) if direction == 0: plt.plot(x[:, 0, 0], sol_val1[:, 0, 0], "ob", label="numerical") @@ -193,25 +173,24 @@ def rho_pulled(e1, e2, e3): plt.plot(y[0, :, 0], sol_val1[0, :, 0], "ob", label="numerical") plt.plot(y[0, :, 0], analytic_value1[0, :, 0], "r--", label="exact") plt.xlabel("y") - plt.title(f"{Nel =}") + plt.title(f"{Nel = }") plt.legend() - error = xp.max(xp.abs(analytic_value1 - sol_val1)) - print(f"{direction =}, {pi =}, {Neli =}, {error=}") + error = np.max(np.abs(analytic_value1 - sol_val1)) + print(f"{direction = }, {pi = }, {Neli = }, {error=}") errors.append(error) h = 1 / (Neli) h_vec.append(h) - m, _ = xp.polyfit(xp.log(Nels), xp.log(errors), deg=1) - print(f"For {pi =}, solution converges in {direction=} with rate {-m =} ") - assert -m > (pi + 1 - 0.07) + m, _ = np.polyfit(np.log(Nels), np.log(errors), deg=1) + print(f"For {pi = }, solution converges in {direction=} with rate {-m = } ") + assert -m > (pi + 1 - 0.06) # Plot convergence in 1D if show_plot: plt.figure( - f"Convergence for degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}", - figsize=(12, 8), + f"Convergence for degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }", figsize=(12, 8) ) plt.plot(h_vec, errors, "o", label=f"p={p[direction]}") plt.plot( @@ -224,7 +203,7 @@ def rho_pulled(e1, e2, e3): plt.xscale("log") plt.xlabel("Grid Spacing h") plt.ylabel("Error") - plt.title("Poisson solver") + plt.title(f"Poisson solver") plt.legend() if show_plot and rank == 0: @@ -241,19 +220,26 @@ def rho_pulled(e1, e2, e3): ["Orthogonal", {"Lx": 4.0, "Ly": 2.0, "alpha": 0.1, "Lz": 1.0}], ], ) -@pytest.mark.parametrize("projected_rhs", [False, True]) -def test_poisson_M1perp_2d(Nel, p, bc_type, mapping, projected_rhs, show_plot=False): +def test_poisson_M1perp_2d(Nel, p, bc_type, mapping, show_plot=False): """ Test the Poisson solver with M1perp diffusion matrix by means of manufactured solutions in 2D . """ + solver_params = { + "type": ("pcg", "MassMatrixPreconditioner"), + "tol": 1.0e-13, + "maxiter": 3000, + "info": True, + "verbose": False, + "recycle": False, + } # create domain object dom_type = mapping[0] dom_params = mapping[1] domain_class = getattr(domains, dom_type) - domain: Domain = domain_class(**dom_params) + domain = domain_class(**dom_params) if dom_type == "Cuboid": Lx = dom_params["r1"] - dom_params["l1"] @@ -264,10 +250,10 @@ def test_poisson_M1perp_2d(Nel, p, bc_type, mapping, projected_rhs, show_plot=Fa # manufactured solution in 1D (overwritten for "neumann") def sol1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lx * x) + return np.sin(2 * np.pi / Lx * x) def rho1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lx * x) * (2 * xp.pi / Lx) ** 2 + return np.sin(2 * np.pi / Lx * x) * (2 * np.pi / Lx) ** 2 # boundary conditions dirichlet_bc = None @@ -277,26 +263,25 @@ def rho1_xyz(x, y, z): # manufactured solution in 2D def sol2_xyz(x, y, z): - return xp.sin(2 * xp.pi * x / Lx + 4 * xp.pi / Ly * y) + return np.sin(2 * np.pi * x / Lx + 4 * np.pi / Ly * y) def rho2_xyz(x, y, z): - ddx = xp.sin(2 * xp.pi / Lx * x + 4 * xp.pi / Ly * y) * (2 * xp.pi / Lx) ** 2 - ddy = xp.sin(2 * xp.pi / Lx * x + 4 * xp.pi / Ly * y) * (4 * xp.pi / Ly) ** 2 + ddx = np.sin(2 * np.pi / Lx * x + 4 * np.pi / Ly * y) * (2 * np.pi / Lx) ** 2 + ddy = np.sin(2 * np.pi / Lx * x + 4 * np.pi / Ly * y) * (4 * np.pi / Ly) ** 2 return ddx + ddy elif bc_type == "dirichlet": spl_kind = [False, True, True] - dirichlet_bc = [(not kd,) * 2 for kd in spl_kind] - dirichlet_bc = tuple(dirichlet_bc) - print(f"{dirichlet_bc =}") + dirichlet_bc = [[not kd] * 2 for kd in spl_kind] + print(f"{dirichlet_bc = }") # manufactured solution in 2D def sol2_xyz(x, y, z): - return xp.sin(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) + return np.sin(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) def rho2_xyz(x, y, z): - ddx = xp.sin(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) * (xp.pi / Lx) ** 2 - ddy = xp.sin(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) * (4 * xp.pi / Ly) ** 2 + ddx = np.sin(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) * (np.pi / Lx) ** 2 + ddy = np.sin(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) * (4 * np.pi / Ly) ** 2 return ddx + ddy elif bc_type == "neumann": @@ -304,19 +289,19 @@ def rho2_xyz(x, y, z): # manufactured solution in 2D def sol2_xyz(x, y, z): - return xp.cos(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) + return np.cos(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) def rho2_xyz(x, y, z): - ddx = xp.cos(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) * (xp.pi / Lx) ** 2 - ddy = xp.cos(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) * (4 * xp.pi / Ly) ** 2 + ddx = np.cos(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) * (np.pi / Lx) ** 2 + ddy = np.cos(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) * (4 * np.pi / Ly) ** 2 return ddx + ddy # manufactured solution in 1D def sol1_xyz(x, y, z): - return xp.cos(xp.pi / Lx * x) + return np.cos(np.pi / Lx * x) def rho1_xyz(x, y, z): - return xp.cos(xp.pi / Lx * x) * (xp.pi / Lx) ** 2 + return np.cos(np.pi / Lx * x) * (np.pi / Lx) ** 2 # create derham object derham = Derham(Nel, p, spl_kind, dirichlet_bc=dirichlet_bc, comm=comm) @@ -329,99 +314,53 @@ def rho1_xyz(x, y, z): Propagator.mass_ops = mass_ops # evaluation grid - e1 = xp.linspace(0.0, 1.0, 50) - e2 = xp.linspace(0.0, 1.0, 50) - e3 = xp.linspace(0.0, 1.0, 1) + e1 = np.linspace(0.0, 1.0, 50) + e2 = np.linspace(0.0, 1.0, 50) + e3 = np.linspace(0.0, 1.0, 1) # pullbacks of right-hand side - def rho1_pulled(e1, e2, e3): - return domain.pull(rho1_xyz, e1, e2, e3, kind="0", squeeze_out=False) - - def rho2_pulled(e1, e2, e3): - return domain.pull(rho2_xyz, e1, e2, e3, kind="0", squeeze_out=False) + def rho1(e1, e2, e3): + return domain.pull(rho1_xyz, e1, e2, e3, kind="0", squeeze_out=True) - # how to pass right-hand sides - if projected_rhs: - rho1 = FEECVariable(space="H1") - rho1.allocate(derham=derham, domain=domain) - rho1.spline.vector = derham.P["0"](rho1_pulled) + def rho2(e1, e2, e3): + return domain.pull(rho2_xyz, e1, e2, e3, kind="0", squeeze_out=True) - rho2 = FEECVariable(space="H1") - rho2.allocate(derham=derham, domain=domain) - rho2.spline.vector = derham.P["0"](rho2_pulled) - else: - rho1 = rho1_pulled - rho2 = rho2_pulled + # discrete right-hand sides + l2_proj = L2Projector("H1", mass_ops) + rho_vec1 = l2_proj.get_dofs(rho1, apply_bc=True) + rho_vec2 = l2_proj.get_dofs(rho2, apply_bc=True) # Create Poisson solvers - solver_params = SolverParameters( - tol=1.0e-13, - maxiter=3000, - info=True, - verbose=False, - recycle=False, - ) - - _phi1 = FEECVariable(space="H1") - _phi1.allocate(derham=derham, domain=domain) - - poisson_solver1 = ImplicitDiffusion() - poisson_solver1.variables.phi = _phi1 - - poisson_solver1.options = poisson_solver1.Options( - sigma_1=1e-8, - sigma_2=0.0, - sigma_3=1.0, - divide_by_dt=True, - diffusion_mat="M1perp", - rho=rho1, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, + _phi1 = derham.create_spline_function("test1", "H1") + poisson_solver1 = ImplicitDiffusion( + _phi1.vector, sigma_1=1e-8, sigma_2=0.0, sigma_3=1.0, diffusion_mat="M1perp", rho=rho_vec1, solver=solver_params ) - poisson_solver1.allocate() - - _phi2 = FEECVariable(space="H1") - _phi2.allocate(derham=derham, domain=domain) - - poisson_solver2 = ImplicitDiffusion() - poisson_solver2.variables.phi = _phi2 - - poisson_solver2.options = poisson_solver2.Options( - sigma_1=1e-8, - sigma_2=0.0, - sigma_3=1.0, - divide_by_dt=True, - diffusion_mat="M1perp", - rho=rho2, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, + _phi2 = derham.create_spline_function("test2", "H1") + poisson_solver2 = ImplicitDiffusion( + _phi2.vector, sigma_1=1e-8, sigma_2=0.0, sigma_3=1.0, diffusion_mat="M1perp", rho=rho_vec2, solver=solver_params ) - poisson_solver2.allocate() - # Solve Poisson equation (call propagator with dt=1.) dt = 1.0 poisson_solver1(dt) poisson_solver2(dt) # push numerical solutions - sol_val1 = domain.push(_phi1.spline, e1, e2, e3, kind="0") - sol_val2 = domain.push(_phi2.spline, e1, e2, e3, kind="0") + sol_val1 = domain.push(_phi1, e1, e2, e3, kind="0") + sol_val2 = domain.push(_phi2, e1, e2, e3, kind="0") x, y, z = domain(e1, e2, e3) analytic_value1 = sol1_xyz(x, y, z) analytic_value2 = sol2_xyz(x, y, z) # compute error - error1 = xp.max(xp.abs(analytic_value1 - sol_val1)) - error2 = xp.max(xp.abs(analytic_value2 - sol_val2)) + error1 = np.max(np.abs(analytic_value1 - sol_val1)) + error2 = np.max(np.abs(analytic_value2 - sol_val2)) - print(f"{p =}, {bc_type =}, {mapping =}") - print(f"{error1 =}") - print(f"{error2 =}") + print(f"{p = }, {bc_type = }, {mapping = }") + print(f"{error1 = }") + print(f"{error2 = }") print("") if show_plot and rank == 0: @@ -447,7 +386,7 @@ def rho2_pulled(e1, e2, e3): plt.show() assert error1 < 0.0044 - assert error2 < 0.023 + assert error2 < 0.021 @pytest.mark.skip(reason="Not clear if the 2.5d strategy is sound.") @@ -469,26 +408,35 @@ def test_poisson_M1perp_3d_compare_2p5d(Nel, p, mapping, show_plot=False): from time import time + solver_params = { + "type": ("pcg", "MassMatrixPreconditioner"), + "tol": 1.0e-13, + "maxiter": 3000, + "info": False, + "verbose": False, + "recycle": False, + } + # create domain object dom_type = mapping[0] dom_params = mapping[1] domain_class = getattr(domains, dom_type) - domain: Domain = domain_class(**dom_params) + domain = domain_class(**dom_params) # boundary conditions spl_kind = [False, True, True] - dirichlet_bc = ((True, True), (False, False), (False, False)) + dirichlet_bc = [[True, True], [False, False], [False, False]] # evaluation grid - e1 = xp.linspace(0.0, 1.0, 50) - e2 = xp.linspace(0.0, 1.0, 60) - e3 = xp.linspace(0.0, 1.0, 30) + e1 = np.linspace(0.0, 1.0, 50) + e2 = np.linspace(0.0, 1.0, 60) + e3 = np.linspace(0.0, 1.0, 30) # solution and right-hand side on unit cube def rho(e1, e2, e3): - dd1 = xp.sin(xp.pi * e1) * xp.sin(4 * xp.pi * e2) * xp.cos(2 * xp.pi * e3) * (xp.pi) ** 2 - dd2 = xp.sin(xp.pi * e1) * xp.sin(4 * xp.pi * e2) * xp.cos(2 * xp.pi * e3) * (4 * xp.pi) ** 2 + dd1 = np.sin(np.pi * e1) * np.sin(4 * np.pi * e2) * np.cos(2 * np.pi * e3) * (np.pi) ** 2 + dd2 = np.sin(np.pi * e1) * np.sin(4 * np.pi * e2) * np.cos(2 * np.pi * e3) * (4 * np.pi) ** 2 return dd1 + dd2 # create 3d derham object @@ -504,47 +452,23 @@ def rho(e1, e2, e3): l2_proj = L2Projector("H1", mass_ops) rho_vec = l2_proj.get_dofs(rho, apply_bc=True) - print(f"{rho_vec[:].shape =}") + print(f"{rho_vec[:].shape = }") # Create 3d Poisson solver - solver_params = SolverParameters( - tol=1.0e-13, - maxiter=3000, - info=True, - verbose=False, - recycle=False, + _phi = derham.create_spline_function("test2", "H1") + _phi_2p5d = derham.create_spline_function("sol_2p5d", "H1") + poisson_solver_3d = ImplicitDiffusion( + _phi.vector, sigma_1=1e-8, sigma_2=0.0, sigma_3=1.0, diffusion_mat="M1perp", rho=rho_vec, solver=solver_params ) - _phi = FEECVariable(space="H1") - _phi.allocate(derham=derham, domain=domain) - - _phi_2p5d = FEECVariable(space="H1") - _phi_2p5d.allocate(derham=derham, domain=domain) - - poisson_solver_3d = ImplicitDiffusion() - poisson_solver_3d.variables.phi = _phi - - poisson_solver_3d.options = poisson_solver_3d.Options( - sigma_1=1e-8, - sigma_2=0.0, - sigma_3=1.0, - divide_by_dt=True, - diffusion_mat="M1perp", - rho=rho, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, - ) - - poisson_solver_3d.allocate() - - s = _phi.spline.starts - e = _phi.spline.ends + s = _phi.starts + e = _phi.ends # create 2.5d deRham object Nel_new = [Nel[0], Nel[1], 1] p[2] = 1 spl_kind[2] = True + dirichlet_bc[2] = [False, False] derham = Derham(Nel_new, p, spl_kind, dirichlet_bc=dirichlet_bc, comm=comm) mass_ops = WeightedMassOperators(derham, domain) @@ -552,26 +476,18 @@ def rho(e1, e2, e3): Propagator.derham = derham Propagator.mass_ops = mass_ops - _phi_small = FEECVariable(space="H1") - _phi_small.allocate(derham=derham, domain=domain) - - poisson_solver_2p5d = ImplicitDiffusion() - poisson_solver_2p5d.variables.phi = _phi_small - - poisson_solver_2p5d.options = poisson_solver_2p5d.Options( + _phi_small = derham.create_spline_function("test_small", "H1") + rhs = derham.create_spline_function("rhs", "H1") + poisson_solver_2p5d = ImplicitDiffusion( + _phi_small.vector, sigma_1=1e-8, sigma_2=0.0, sigma_3=1.0, - divide_by_dt=True, diffusion_mat="M1perp", - rho=rho, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, + rho=rhs.vector, + solver=solver_params, ) - poisson_solver_2p5d.allocate() - # Solve Poisson equation (call propagator with dt=1.) dt = 1.0 t0 = time() @@ -583,24 +499,26 @@ def rho(e1, e2, e3): t0 = time() t_inner = 0.0 for n in range(s[2], e[2] + 1): + # scale the rhs with Nel[2] !! + rhs.vector[s[0] : e[0] + 1, s[1] : e[1] + 1, 0] = rho_vec[s[0] : e[0] + 1, s[1] : e[1] + 1, n] * Nel[2] t0i = time() poisson_solver_2p5d(dt) t1i = time() t_inner += t1i - t0i - _tmp = _phi_small.spline.vector.copy() - _phi_2p5d.spline.vector[s[0] : e[0] + 1, s[1] : e[1] + 1, n] = _tmp[s[0] : e[0] + 1, s[1] : e[1] + 1, 0] + _tmp = _phi_small.vector.copy() + _phi_2p5d.vector[s[0] : e[0] + 1, s[1] : e[1] + 1, n] = _tmp[s[0] : e[0] + 1, s[1] : e[1] + 1, 0] t1 = time() print(f"rank {rank}, 2.5d pure solve time (without copy) = {t_inner}") print(f"rank {rank}, 2.5d solve time = {t1 - t0}") # push numerical solutions - sol_val = domain.push(_phi.spline, e1, e2, e3, kind="0") - sol_val_2p5d = domain.push(_phi_2p5d.spline, e1, e2, e3, kind="0") + sol_val = domain.push(_phi, e1, e2, e3, kind="0") + sol_val_2p5d = domain.push(_phi_2p5d, e1, e2, e3, kind="0") x, y, z = domain(e1, e2, e3) - print("max diff:", xp.max(xp.abs(sol_val - sol_val_2p5d))) - assert xp.max(xp.abs(sol_val - sol_val_2p5d)) < 0.026 + print("max diff:", np.max(np.abs(sol_val - sol_val_2p5d))) + assert np.max(np.abs(sol_val - sol_val_2p5d)) < 0.026 if show_plot and rank == 0: plt.figure("e1-e2 plane", figsize=(24, 16)) @@ -637,9 +555,9 @@ def rho(e1, e2, e3): if __name__ == "__main__": direction = 0 - bc_type = "dirichlet" + bc_type = "periodic" mapping = ["Cuboid", {"l1": 0.0, "r1": 4.0, "l2": 0.0, "r2": 2.0, "l3": 0.0, "r3": 3.0}] - mapping = ["Orthogonal", {"Lx": 4.0, "Ly": 2.0, "alpha": 0.1, "Lz": 3.0}] + # mapping = ['Orthogonal', {'Lx': 4., 'Ly': 2., 'alpha': .1, 'Lz': 3.}] test_poisson_M1perp_1d(direction, bc_type, mapping, show_plot=True) # Nel = [64, 64, 1] @@ -651,5 +569,6 @@ def rho(e1, e2, e3): # Nel = [64, 64, 16] # p = [2, 2, 1] - # mapping = ["Cuboid", {"l1": 0.0, "r1": 1.0, "l2": 0.0, "r2": 1.0, "l3": 0.0, "r3": 1.0}] + # mapping = ['Cuboid', {'l1': 0., 'r1': 1., + # 'l2': 0., 'r2': 1., 'l3': 0., 'r3': 1.}] # test_poisson_M1perp_3d_compare_2p5d(Nel, p, mapping, show_plot=True) diff --git a/src/struphy/propagators/tests/test_poisson.py b/src/struphy/propagators/tests/test_poisson.py index 588aa2aa1..c68cf5041 100644 --- a/src/struphy/propagators/tests/test_poisson.py +++ b/src/struphy/propagators/tests/test_poisson.py @@ -1,4 +1,3 @@ -import cunumpy as xp import matplotlib.pyplot as plt import pytest from psydac.ddm.mpi import mpi as MPI @@ -7,23 +6,9 @@ from struphy.feec.projectors import L2Projector from struphy.feec.psydac_derham import Derham from struphy.geometry import domains -from struphy.geometry.base import Domain -from struphy.initial import perturbations -from struphy.kinetic_background.maxwellians import Maxwellian3D -from struphy.linear_algebra.solver import SolverParameters -from struphy.models.variables import FEECVariable -from struphy.pic.accumulation.accum_kernels import charge_density_0form -from struphy.pic.accumulation.particles_to_grid import AccumulatorVector -from struphy.pic.particles import Particles6D -from struphy.pic.utilities import ( - BinningPlot, - BoundaryParameters, - LoadingParameters, - WeightsParameters, -) +from struphy.propagators import ImplicitDiffusion from struphy.propagators.base import Propagator -from struphy.propagators.propagators_fields import ImplicitDiffusion, Poisson -from struphy.utils.pyccel import Pyccelkernel +from struphy.utils.arrays import xp as np comm = MPI.COMM_WORLD rank = comm.Get_rank() @@ -39,24 +24,26 @@ ["Orthogonal", {"Lx": 4.0, "Ly": 2.0, "alpha": 0.1, "Lz": 3.0}], ], ) -@pytest.mark.parametrize("projected_rhs", [False, True]) -def test_poisson_1d( - direction: int, - bc_type: str, - mapping: list[str, dict], - projected_rhs: bool, - show_plot: bool = False, -): +def test_poisson_1d(direction, bc_type, mapping, show_plot=False): """ Test the convergence of Poisson solver in 1D by means of manufactured solutions. """ + solver_params = { + "type": ("pcg", "MassMatrixPreconditioner"), + "tol": 1.0e-13, + "maxiter": 3000, + "info": True, + "verbose": False, + "recycle": False, + } + # create domain object dom_type = mapping[0] dom_params = mapping[1] domain_class = getattr(domains, dom_type) - domain: Domain = domain_class(**dom_params) + domain = domain_class(**dom_params) if dom_type == "Cuboid": Lx = dom_params["r1"] - dom_params["l1"] @@ -73,9 +60,9 @@ def test_poisson_1d( errors = [] h_vec = [] if show_plot: - plt.figure(f"degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}", figsize=(24, 16)) - plt.figure(f"degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}", figsize=(24, 16)) - plt.figure(f"degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}", figsize=(24, 16)) + plt.figure(f"degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }", figsize=(24, 16)) + plt.figure(f"degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }", figsize=(24, 16)) + plt.figure(f"degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }", figsize=(24, 16)) for n, Neli in enumerate(Nels): # boundary conditions (overwritten below) @@ -89,77 +76,74 @@ def test_poisson_1d( if direction == 0: Nel = [Neli, 1, 1] p = [pi, 1, 1] - e1 = xp.linspace(0.0, 1.0, 50) + e1 = np.linspace(0.0, 1.0, 50) if bc_type == "neumann": spl_kind = [False, True, True] def sol1_xyz(x, y, z): - return xp.cos(xp.pi / Lx * x) + return np.cos(np.pi / Lx * x) def rho1_xyz(x, y, z): - return xp.cos(xp.pi / Lx * x) * (xp.pi / Lx) ** 2 + return np.cos(np.pi / Lx * x) * (np.pi / Lx) ** 2 else: if bc_type == "dirichlet": spl_kind = [False, True, True] - dirichlet_bc = [(not kd,) * 2 for kd in spl_kind] - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[not kd] * 2 for kd in spl_kind] def sol1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lx * x) + return np.sin(2 * np.pi / Lx * x) def rho1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lx * x) * (2 * xp.pi / Lx) ** 2 + return np.sin(2 * np.pi / Lx * x) * (2 * np.pi / Lx) ** 2 elif direction == 1: Nel = [1, Neli, 1] p = [1, pi, 1] - e2 = xp.linspace(0.0, 1.0, 50) + e2 = np.linspace(0.0, 1.0, 50) if bc_type == "neumann": spl_kind = [True, False, True] def sol1_xyz(x, y, z): - return xp.cos(xp.pi / Ly * y) + return np.cos(np.pi / Ly * y) def rho1_xyz(x, y, z): - return xp.cos(xp.pi / Ly * y) * (xp.pi / Ly) ** 2 + return np.cos(np.pi / Ly * y) * (np.pi / Ly) ** 2 else: if bc_type == "dirichlet": spl_kind = [True, False, True] - dirichlet_bc = [(not kd,) * 2 for kd in spl_kind] - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[not kd] * 2 for kd in spl_kind] def sol1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Ly * y) + return np.sin(2 * np.pi / Ly * y) def rho1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Ly * y) * (2 * xp.pi / Ly) ** 2 + return np.sin(2 * np.pi / Ly * y) * (2 * np.pi / Ly) ** 2 elif direction == 2: Nel = [1, 1, Neli] p = [1, 1, pi] - e3 = xp.linspace(0.0, 1.0, 50) + e3 = np.linspace(0.0, 1.0, 50) if bc_type == "neumann": spl_kind = [True, True, False] def sol1_xyz(x, y, z): - return xp.cos(xp.pi / Lz * z) + return np.cos(np.pi / Lz * z) def rho1_xyz(x, y, z): - return xp.cos(xp.pi / Lz * z) * (xp.pi / Lz) ** 2 + return np.cos(np.pi / Lz * z) * (np.pi / Lz) ** 2 else: if bc_type == "dirichlet": spl_kind = [True, True, False] - dirichlet_bc = [(not kd,) * 2 for kd in spl_kind] - dirichlet_bc = tuple(dirichlet_bc) + dirichlet_bc = [[not kd] * 2 for kd in spl_kind] def sol1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lz * z) + return np.sin(2 * np.pi / Lz * z) def rho1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lz * z) * (2 * xp.pi / Lz) ** 2 + return np.sin(2 * np.pi / Lz * z) * (2 * np.pi / Lz) ** 2 else: print("Direction should be either 0, 1 or 2") @@ -174,55 +158,28 @@ def rho1_xyz(x, y, z): Propagator.mass_ops = mass_ops # pullbacks of right-hand side - def rho_pulled(e1, e2, e3): - return domain.pull(rho1_xyz, e1, e2, e3, kind="0", squeeze_out=False) - - # define how to pass rho - if projected_rhs: - rho = FEECVariable(space="H1") - rho.allocate(derham=derham, domain=domain) - rho.spline.vector = derham.P["0"](rho_pulled) - else: - rho = rho_pulled - - # create Poisson solver - solver_params = SolverParameters( - tol=1.0e-13, - maxiter=3000, - info=True, - verbose=False, - recycle=False, - ) + def rho1(e1, e2, e3): + return domain.pull(rho1_xyz, e1, e2, e3, kind="0", squeeze_out=True) - _phi = FEECVariable(space="H1") - _phi.allocate(derham=derham, domain=domain) + rho_vec = L2Projector("H1", mass_ops).get_dofs(rho1, apply_bc=True) - poisson_solver = Poisson() - poisson_solver.variables.phi = _phi - - poisson_solver.options = poisson_solver.Options( - stab_eps=1e-12, - # sigma_2=0.0, - # sigma_3=1.0, - rho=rho, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, + # create Poisson solver + _phi = derham.create_spline_function("phi", "H1") + poisson_solver = ImplicitDiffusion( + _phi.vector, sigma_1=1e-12, sigma_2=0.0, sigma_3=1.0, rho=rho_vec, solver=solver_params ) - poisson_solver.allocate() - # Solve Poisson (call propagator with dt=1.) dt = 1.0 poisson_solver(dt) # push numerical solution and compare - sol_val1 = domain.push(_phi.spline, e1, e2, e3, kind="0") + sol_val1 = domain.push(_phi, e1, e2, e3, kind="0") x, y, z = domain(e1, e2, e3) analytic_value1 = sol1_xyz(x, y, z) if show_plot: - plt.figure(f"degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}") + plt.figure(f"degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }") plt.subplot(2, 3, n + 1) if direction == 0: plt.plot(x[:, 0, 0], sol_val1[:, 0, 0], "ob", label="numerical") @@ -236,25 +193,24 @@ def rho_pulled(e1, e2, e3): plt.plot(z[0, 0, :], sol_val1[0, 0, :], "ob", label="numerical") plt.plot(z[0, 0, :], analytic_value1[0, 0, :], "r--", label="exact") plt.xlabel("z") - plt.title(f"{Nel =}") + plt.title(f"{Nel = }") plt.legend() - error = xp.max(xp.abs(analytic_value1 - sol_val1)) - print(f"{direction =}, {pi =}, {Neli =}, {error=}") + error = np.max(np.abs(analytic_value1 - sol_val1)) + print(f"{direction = }, {pi = }, {Neli = }, {error=}") errors.append(error) h = 1 / (Neli) h_vec.append(h) - m, _ = xp.polyfit(xp.log(Nels), xp.log(errors), deg=1) - print(f"For {pi =}, solution converges in {direction=} with rate {-m =} ") - assert -m > (pi + 1 - 0.07) + m, _ = np.polyfit(np.log(Nels), np.log(errors), deg=1) + print(f"For {pi = }, solution converges in {direction=} with rate {-m = } ") + assert -m > (pi + 1 - 0.06) # Plot convergence in 1D if show_plot: plt.figure( - f"Convergence for degree {pi =}, {direction + 1 =}, {bc_type =}, {mapping[0] =}", - figsize=(12, 8), + f"Convergence for degree {pi = }, {direction + 1 = }, {bc_type = }, {mapping[0] = }", figsize=(12, 8) ) plt.plot(h_vec, errors, "o", label=f"p={p[direction]}") plt.plot( @@ -267,169 +223,13 @@ def rho_pulled(e1, e2, e3): plt.xscale("log") plt.xlabel("Grid Spacing h") plt.ylabel("Error") - plt.title("Poisson solver") + plt.title(f"Poisson solver") plt.legend() if show_plot and rank == 0: plt.show() -@pytest.mark.parametrize( - "mapping", - [ - ["Cuboid", {"l1": 0.0, "r1": 4.0, "l2": 0.0, "r2": 2.0, "l3": 0.0, "r3": 3.0}], - # ["Orthogonal", {"Lx": 4.0, "Ly": 2.0, "alpha": 0.1, "Lz": 3.0}], - ], -) -def test_poisson_accum_1d(mapping, do_plot=False): - """Pass accumulators as rhs.""" - # create domain object - dom_type = mapping[0] - dom_params = mapping[1] - - domain_class = getattr(domains, dom_type) - domain: Domain = domain_class(**dom_params) - - if dom_type == "Cuboid": - Lx = dom_params["r1"] - dom_params["l1"] - else: - Lx = dom_params["Lx"] - - # create derham object - Nel = (16, 1, 1) - p = (2, 1, 1) - spl_kind = (True, True, True) - derham = Derham(Nel, p, spl_kind, comm=comm) - - # mass matrices - mass_ops = WeightedMassOperators(derham, domain) - - Propagator.derham = derham - Propagator.domain = domain - Propagator.mass_ops = mass_ops - - # 6D particle object - domain_array = derham.domain_array - nprocs = derham.domain_decomposition.nprocs - domain_decomp = (domain_array, nprocs) - - lp = LoadingParameters(ppc=4000, seed=765) - wp = WeightsParameters(control_variate=True) - bp = BoundaryParameters() - - backgr = Maxwellian3D(n=(1.0, None)) - l = 1 - amp = 1e-1 - pert = perturbations.ModesCos(ls=(l,), amps=(amp,)) - maxw = Maxwellian3D(n=(1.0, pert)) - - pert_exact = lambda x, y, z: amp * xp.cos(l * 2 * xp.pi / Lx * x) - phi_exact = lambda x, y, z: amp / (l * 2 * xp.pi / Lx) ** 2 * xp.cos(l * 2 * xp.pi / Lx * x) - e_exact = lambda x, y, z: amp / (l * 2 * xp.pi / Lx) * xp.sin(l * 2 * xp.pi / Lx * x) - - particles = Particles6D( - comm_world=comm, - domain_decomp=domain_decomp, - loading_params=lp, - weights_params=wp, - boundary_params=bp, - domain=domain, - background=backgr, - initial_condition=maxw, - ) - particles.draw_markers() - particles.initialize_weights() - - # particle to grid coupling - kernel = Pyccelkernel(charge_density_0form) - accum = AccumulatorVector(particles, "H1", kernel, mass_ops, domain.args_domain) - # accum() - # if do_plot: - # accum.show_accumulated_spline_field(mass_ops) - - rho = accum - - # create Poisson solver - solver_params = SolverParameters( - tol=1.0e-13, - maxiter=3000, - info=True, - verbose=False, - recycle=False, - ) - - _phi = FEECVariable(space="H1") - _phi.allocate(derham=derham, domain=domain) - - poisson_solver = Poisson() - poisson_solver.variables.phi = _phi - - poisson_solver.options = poisson_solver.Options( - stab_eps=1e-6, - # sigma_2=0.0, - # sigma_3=1.0, - rho=rho, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, - ) - - poisson_solver.allocate() - - # Solve Poisson (call propagator with dt=1.) - dt = 1.0 - poisson_solver(dt) - - # push numerical solution and compare - e1 = xp.linspace(0.0, 1.0, 50) - e2 = 0.0 - e3 = 0.0 - - num_values = domain.push(_phi.spline, e1, e2, e3, kind="0") - x, y, z = domain(e1, e2, e3) - pert_values = pert_exact(x, y, z) - analytic_values = phi_exact(x, y, z) - e_values = e_exact(x, y, z) - - _e = FEECVariable(space="Hcurl") - _e.allocate(derham=derham, domain=domain) - derham.grad.dot(-_phi.spline.vector, out=_e.spline.vector) - num_values_e = domain.push(_e.spline, e1, e2, e3, kind="1") - - if do_plot: - field = derham.create_spline_function("accum_field", "H1") - field.vector = accum.vectors[0] - accum_values = field(e1, e2, e3) - - plt.figure(figsize=(18, 12)) - plt.subplot(1, 3, 1) - plt.plot(x[:, 0, 0], num_values[:, 0, 0], "ob", label="numerical") - plt.plot(x[:, 0, 0], analytic_values[:, 0, 0], "r--", label="exact") - plt.xlabel("x") - plt.title("phi") - plt.legend() - plt.subplot(1, 3, 2) - plt.plot(x[:, 0, 0], accum_values[:, 0, 0], "ob", label="numerical, without L2-proj") - plt.plot(x[:, 0, 0], pert_values[:, 0, 0], "r--", label="exact") - plt.xlabel("x") - plt.title("rhs") - plt.legend() - plt.subplot(1, 3, 3) - plt.plot(x[:, 0, 0], num_values_e[0][:, 0, 0], "ob", label="numerical") - plt.plot(x[:, 0, 0], e_values[:, 0, 0], "r--", label="exact") - plt.xlabel("x") - plt.title("e_field") - plt.legend() - - plt.show() - - error = xp.max(xp.abs(num_values_e[0][:, 0, 0] - e_values[:, 0, 0])) / xp.max(xp.abs(e_values[:, 0, 0])) - print(f"{error=}") - - assert error < 0.0086 - - -@pytest.mark.mpi(min_size=2) @pytest.mark.parametrize("Nel", [[64, 64, 1]]) @pytest.mark.parametrize("p", [[1, 1, 1], [2, 2, 1]]) @pytest.mark.parametrize("bc_type", ["periodic", "dirichlet", "neumann"]) @@ -440,18 +240,25 @@ def test_poisson_accum_1d(mapping, do_plot=False): ["Colella", {"Lx": 4.0, "Ly": 2.0, "alpha": 0.1, "Lz": 1.0}], ], ) -@pytest.mark.parametrize("projected_rhs", [False, True]) -def test_poisson_2d(Nel, p, bc_type, mapping, projected_rhs, show_plot=False): +def test_poisson_2d(Nel, p, bc_type, mapping, show_plot=False): """ Test the Poisson solver by means of manufactured solutions in 2D . """ + solver_params = { + "type": ("pcg", "MassMatrixPreconditioner"), + "tol": 1.0e-13, + "maxiter": 3000, + "info": True, + "verbose": False, + "recycle": False, + } # create domain object dom_type = mapping[0] dom_params = mapping[1] domain_class = getattr(domains, dom_type) - domain: Domain = domain_class(**dom_params) + domain = domain_class(**dom_params) if dom_type == "Cuboid": Lx = dom_params["r1"] - dom_params["l1"] @@ -462,10 +269,10 @@ def test_poisson_2d(Nel, p, bc_type, mapping, projected_rhs, show_plot=False): # manufactured solution in 1D (overwritten for "neumann") def sol1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lx * x) + return np.sin(2 * np.pi / Lx * x) def rho1_xyz(x, y, z): - return xp.sin(2 * xp.pi / Lx * x) * (2 * xp.pi / Lx) ** 2 + return np.sin(2 * np.pi / Lx * x) * (2 * np.pi / Lx) ** 2 # boundary conditions dirichlet_bc = None @@ -475,26 +282,25 @@ def rho1_xyz(x, y, z): # manufactured solution in 2D def sol2_xyz(x, y, z): - return xp.sin(2 * xp.pi * x / Lx + 4 * xp.pi / Ly * y) + return np.sin(2 * np.pi * x / Lx + 4 * np.pi / Ly * y) def rho2_xyz(x, y, z): - ddx = xp.sin(2 * xp.pi / Lx * x + 4 * xp.pi / Ly * y) * (2 * xp.pi / Lx) ** 2 - ddy = xp.sin(2 * xp.pi / Lx * x + 4 * xp.pi / Ly * y) * (4 * xp.pi / Ly) ** 2 + ddx = np.sin(2 * np.pi / Lx * x + 4 * np.pi / Ly * y) * (2 * np.pi / Lx) ** 2 + ddy = np.sin(2 * np.pi / Lx * x + 4 * np.pi / Ly * y) * (4 * np.pi / Ly) ** 2 return ddx + ddy elif bc_type == "dirichlet": spl_kind = [False, True, True] - dirichlet_bc = [(not kd,) * 2 for kd in spl_kind] - dirichlet_bc = tuple(dirichlet_bc) - print(f"{dirichlet_bc =}") + dirichlet_bc = [[not kd] * 2 for kd in spl_kind] + print(f"{dirichlet_bc = }") # manufactured solution in 2D def sol2_xyz(x, y, z): - return xp.sin(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) + return np.sin(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) def rho2_xyz(x, y, z): - ddx = xp.sin(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) * (xp.pi / Lx) ** 2 - ddy = xp.sin(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) * (4 * xp.pi / Ly) ** 2 + ddx = np.sin(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) * (np.pi / Lx) ** 2 + ddy = np.sin(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) * (4 * np.pi / Ly) ** 2 return ddx + ddy elif bc_type == "neumann": @@ -502,19 +308,19 @@ def rho2_xyz(x, y, z): # manufactured solution in 2D def sol2_xyz(x, y, z): - return xp.cos(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) + return np.cos(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) def rho2_xyz(x, y, z): - ddx = xp.cos(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) * (xp.pi / Lx) ** 2 - ddy = xp.cos(xp.pi * x / Lx) * xp.sin(4 * xp.pi / Ly * y) * (4 * xp.pi / Ly) ** 2 + ddx = np.cos(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) * (np.pi / Lx) ** 2 + ddy = np.cos(np.pi * x / Lx) * np.sin(4 * np.pi / Ly * y) * (4 * np.pi / Ly) ** 2 return ddx + ddy # manufactured solution in 1D def sol1_xyz(x, y, z): - return xp.cos(xp.pi / Lx * x) + return np.cos(np.pi / Lx * x) def rho1_xyz(x, y, z): - return xp.cos(xp.pi / Lx * x) * (xp.pi / Lx) ** 2 + return np.cos(np.pi / Lx * x) * (np.pi / Lx) ** 2 # create derham object derham = Derham(Nel, p, spl_kind, dirichlet_bc=dirichlet_bc, comm=comm) @@ -527,111 +333,53 @@ def rho1_xyz(x, y, z): Propagator.mass_ops = mass_ops # evaluation grid - e1 = xp.linspace(0.0, 1.0, 50) - e2 = xp.linspace(0.0, 1.0, 50) - e3 = xp.linspace(0.0, 1.0, 1) + e1 = np.linspace(0.0, 1.0, 50) + e2 = np.linspace(0.0, 1.0, 50) + e3 = np.linspace(0.0, 1.0, 1) # pullbacks of right-hand side - def rho1_pulled(e1, e2, e3): - return domain.pull(rho1_xyz, e1, e2, e3, kind="0", squeeze_out=False) - - def rho2_pulled(e1, e2, e3): - return domain.pull(rho2_xyz, e1, e2, e3, kind="0", squeeze_out=False) + def rho1(e1, e2, e3): + return domain.pull(rho1_xyz, e1, e2, e3, kind="0", squeeze_out=True) - # how to pass right-hand sides - if projected_rhs: - rho1 = FEECVariable(space="H1") - rho1.allocate(derham=derham, domain=domain) - rho1.spline.vector = derham.P["0"](rho1_pulled) + def rho2(e1, e2, e3): + return domain.pull(rho2_xyz, e1, e2, e3, kind="0", squeeze_out=True) - rho2 = FEECVariable(space="H1") - rho2.allocate(derham=derham, domain=domain) - rho2.spline.vector = derham.P["0"](rho2_pulled) - else: - rho1 = rho1_pulled - rho2 = rho2_pulled + # discrete right-hand sides + l2_proj = L2Projector("H1", mass_ops) + rho_vec1 = l2_proj.get_dofs(rho1, apply_bc=True) + rho_vec2 = l2_proj.get_dofs(rho2, apply_bc=True) # Create Poisson solvers - solver_params = SolverParameters( - tol=1.0e-13, - maxiter=3000, - info=True, - verbose=False, - recycle=False, - ) - - _phi1 = FEECVariable(space="H1") - _phi1.allocate(derham=derham, domain=domain) - - poisson_solver1 = Poisson() - poisson_solver1.variables.phi = _phi1 - - poisson_solver1.options = poisson_solver1.Options( - stab_eps=1e-8, - # sigma_2=0.0, - # sigma_3=1.0, - rho=rho1, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, + _phi1 = derham.create_spline_function("test1", "H1") + poisson_solver1 = ImplicitDiffusion( + _phi1.vector, sigma_1=1e-8, sigma_2=0.0, sigma_3=1.0, rho=rho_vec1, solver=solver_params ) - poisson_solver1.allocate() - - # _phi1 = derham.create_spline_function("test1", "H1") - # poisson_solver1 = Poisson( - # _phi1.vector, sigma_1=1e-8, sigma_2=0.0, sigma_3=1.0, rho=rho_vec1, solver=solver_params - # ) - - _phi2 = FEECVariable(space="H1") - _phi2.allocate(derham=derham, domain=domain) - - poisson_solver2 = Poisson() - poisson_solver2.variables.phi = _phi2 - - stab_eps = 1e-8 - err_lim = 0.03 - if bc_type == "neumann" and dom_type == "Colella": - stab_eps = 1e-4 - err_lim = 0.046 - - poisson_solver2.options = poisson_solver2.Options( - stab_eps=stab_eps, - # sigma_2=0.0, - # sigma_3=1.0, - rho=rho2, - solver="pcg", - precond="MassMatrixPreconditioner", - solver_params=solver_params, + _phi2 = derham.create_spline_function("test2", "H1") + poisson_solver2 = ImplicitDiffusion( + _phi2.vector, sigma_1=1e-8, sigma_2=0.0, sigma_3=1.0, rho=rho_vec2, solver=solver_params ) - poisson_solver2.allocate() - - # _phi2 = derham.create_spline_function("test2", "H1") - # poisson_solver2 = Poisson( - # _phi2.vector, sigma_1=1e-8, sigma_2=0.0, sigma_3=1.0, rho=rho_vec2, solver=solver_params - # ) - # Solve Poisson equation (call propagator with dt=1.) dt = 1.0 poisson_solver1(dt) poisson_solver2(dt) # push numerical solutions - sol_val1 = domain.push(_phi1.spline, e1, e2, e3, kind="0") - sol_val2 = domain.push(_phi2.spline, e1, e2, e3, kind="0") + sol_val1 = domain.push(_phi1, e1, e2, e3, kind="0") + sol_val2 = domain.push(_phi2, e1, e2, e3, kind="0") x, y, z = domain(e1, e2, e3) analytic_value1 = sol1_xyz(x, y, z) analytic_value2 = sol2_xyz(x, y, z) # compute error - error1 = xp.max(xp.abs(analytic_value1 - sol_val1)) - error2 = xp.max(xp.abs(analytic_value2 - sol_val2)) + error1 = np.max(np.abs(analytic_value1 - sol_val1)) + error2 = np.max(np.abs(analytic_value2 - sol_val2)) - print(f"{p =}, {bc_type =}, {mapping =}") - print(f"{error1 =}") - print(f"{error2 =}") + print(f"{p = }, {bc_type = }, {mapping = }") + print(f"{error1 = }") + print(f"{error2 = }") print("") if show_plot and rank == 0: @@ -659,23 +407,21 @@ def rho2_pulled(e1, e2, e3): if p[0] == 1 and bc_type == "neumann" and mapping[0] == "Colella": pass else: - assert error1 < 0.0053 - assert error2 < err_lim + assert error1 < 0.0044 + assert error2 < 0.021 if __name__ == "__main__": - # direction = 0 - # bc_type = "dirichlet" + direction = 0 + bc_type = "dirichlet" mapping = ["Cuboid", {"l1": 0.0, "r1": 4.0, "l2": 0.0, "r2": 2.0, "l3": 0.0, "r3": 3.0}] # mapping = ['Orthogonal', {'Lx': 4., 'Ly': 2., 'alpha': .1, 'Lz': 3.}] - # test_poisson_1d(direction, bc_type, mapping, projected_rhs=True, show_plot=True) + test_poisson_1d(direction, bc_type, mapping, show_plot=True) # Nel = [64, 64, 1] # p = [2, 2, 1] # bc_type = 'neumann' - # # mapping = ['Cuboid', {'l1': 0., 'r1': 4., 'l2': 0., 'r2': 2., 'l3': 0., 'r3': 3.}] - # # mapping = ['Orthogonal', {'Lx': 4., 'Ly': 2., 'alpha': .1, 'Lz': 1.}] + # #mapping = ['Cuboid', {'l1': 0., 'r1': 4., 'l2': 0., 'r2': 2., 'l3': 0., 'r3': 3.}] + # #mapping = ['Orthogonal', {'Lx': 4., 'Ly': 2., 'alpha': .1, 'Lz': 1.}] # mapping = ['Colella', {'Lx': 4., 'Ly': 2., 'alpha': .1, 'Lz': 1.}] - # test_poisson_2d(Nel, p, bc_type, mapping, projected_rhs=True, show_plot=True) - - test_poisson_accum_1d(mapping, do_plot=True) + # test_poisson_2d(Nel, p, bc_type, mapping, show_plot=True) diff --git a/src/struphy/psydac-2.4.5.dev0-py3-none-any.whl b/src/struphy/psydac-2.4.5.dev0-py3-none-any.whl new file mode 100644 index 000000000..71dcd07a0 Binary files /dev/null and b/src/struphy/psydac-2.4.5.dev0-py3-none-any.whl differ diff --git a/src/struphy/tutorials/tests/test_tutorials.py b/src/struphy/tutorials/tests/test_tutorials.py new file mode 100644 index 000000000..c8de4d5c8 --- /dev/null +++ b/src/struphy/tutorials/tests/test_tutorials.py @@ -0,0 +1,172 @@ +import os + +import pytest +import yaml +from psydac.ddm.mpi import mpi as MPI + +import struphy +from struphy.main import main +from struphy.post_processing import pproc_struphy + +comm = MPI.COMM_WORLD +rank = comm.Get_rank() + +libpath = struphy.__path__[0] +i_path = os.path.join(libpath, "io", "inp") +o_path = os.path.join(libpath, "io", "out") + + +def test_tutorial_02(): + main( + "LinearMHDVlasovCC", + os.path.join(i_path, "tutorials", "params_02.yml"), + os.path.join(o_path, "tutorial_02"), + supress_out=True, + ) + + +def test_tutorial_03(): + main( + "LinearMHD", + os.path.join(i_path, "tutorials", "params_03.yml"), + os.path.join(o_path, "tutorial_03"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_03"), physical=True) + + +def test_tutorial_04(fast): + main( + "Maxwell", + os.path.join(i_path, "tutorials", "params_04a.yml"), + os.path.join(o_path, "tutorial_04a"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_04a")) + + main( + "LinearMHD", + os.path.join(i_path, "tutorials", "params_04b.yml"), + os.path.join(o_path, "tutorial_04b"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_04b")) + + if not fast: + main( + "VariationalMHD", + os.path.join(i_path, "tutorials", "params_04c.yml"), + os.path.join(o_path, "tutorial_04c"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_04c")) + + +def test_tutorial_05(): + main( + "Vlasov", + os.path.join(i_path, "tutorials", "params_05a.yml"), + os.path.join(o_path, "tutorial_05a"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_05a")) + + main( + "Vlasov", + os.path.join(i_path, "tutorials", "params_05b.yml"), + os.path.join(o_path, "tutorial_05b"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_05b")) + + main( + "GuidingCenter", + os.path.join(i_path, "tutorials", "params_05c.yml"), + os.path.join(o_path, "tutorial_05c"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_05c")) + + main( + "GuidingCenter", + os.path.join(i_path, "tutorials", "params_05d.yml"), + os.path.join(o_path, "tutorial_05d"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_05d")) + + main( + "GuidingCenter", + os.path.join(i_path, "tutorials", "params_05e.yml"), + os.path.join(o_path, "tutorial_05e"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_05e")) + + main( + "GuidingCenter", + os.path.join(i_path, "tutorials", "params_05f.yml"), + os.path.join(o_path, "tutorial_05f"), + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_05f")) + + +def test_tutorial_12(): + main( + "Vlasov", + os.path.join(i_path, "tutorials", "params_12a.yml"), + os.path.join(o_path, "tutorial_12a"), + save_step=100, + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_12a")) + + main( + "GuidingCenter", + os.path.join(i_path, "tutorials", "params_12b.yml"), + os.path.join(o_path, "tutorial_12b"), + save_step=10, + supress_out=True, + ) + + comm.Barrier() + if rank == 0: + pproc_struphy.main(os.path.join(o_path, "tutorial_12b")) + + +if __name__ == "__main__": + test_tutorial_04(True) diff --git a/src/struphy/utils/arrays.py b/src/struphy/utils/arrays.py new file mode 100644 index 000000000..ae2b9ef82 --- /dev/null +++ b/src/struphy/utils/arrays.py @@ -0,0 +1,59 @@ +import os +from types import ModuleType +from typing import TYPE_CHECKING, Literal + +BackendType = Literal["numpy", "cupy"] + + +class ArrayBackend: + def __init__( + self, + backend: BackendType = "numpy", + verbose: bool = False, + ) -> None: + assert backend.lower() in ["numpy", "cupy"], "Array backend must be either 'numpy' or 'cupy'." + + self._backend: BackendType = "cupy" if backend.lower() == "cupy" else "numpy" + + # Import numpy/cupy + if self.backend == "cupy": + try: + import cupy as cp + + self._xp = cp + except ImportError: + if verbose: + print("CuPy not available.") + self._backend = "numpy" + + if self.backend == "numpy": + import numpy as np + + self._xp = np + + assert isinstance(self.xp, ModuleType) + + if verbose: + print(f"Using {self.xp.__name__} backend.") + + @property + def backend(self) -> BackendType: + return self._backend + + @property + def xp(self) -> ModuleType: + return self._xp + + +# TODO: Make this configurable via environment variable or config file. +array_backend = ArrayBackend( + backend="cupy" if os.getenv("ARRAY_BACKEND", "numpy").lower() == "cupy" else "numpy", + verbose=True, +) + +# TYPE_CHECKING is True when type checking (e.g., mypy), but False at runtime. +# This allows us to use autocompletion for xp (i.e., numpy/cupy) as if numpy was imported. +if TYPE_CHECKING: + import numpy as xp +else: + xp = array_backend.xp diff --git a/src/struphy/utils/clone_config.py b/src/struphy/utils/clone_config.py index b6e14bc8d..a8c5ac289 100644 --- a/src/struphy/utils/clone_config.py +++ b/src/struphy/utils/clone_config.py @@ -1,7 +1,8 @@ -import cunumpy as xp from psydac.ddm.mpi import MockComm from psydac.ddm.mpi import mpi as MPI +from struphy.utils.arrays import xp as np + class CloneConfig: """ @@ -18,7 +19,7 @@ class CloneConfig: def __init__( self, comm: MPI.Intracomm, - params: None, + params=None, num_clones=1, ): """ @@ -27,8 +28,8 @@ def __init__( Parameters: comm : (MPI.Intracomm) The MPI communicator covering all processes. - params : StruphyParameters - Struphy simulation parameters. + params : dict, optional + Dictionary containing simulation parameters. num_clones : int, optional The number of clones to create. The total number of MPI ranks must be divisible by this number. """ @@ -121,10 +122,10 @@ def get_Np_global(self, species_name): if "Np" in markers: return markers["Np"] elif "ppc" in markers: - n_cells = xp.prod(self.params["grid"]["Nel"], dtype=int) + n_cells = np.prod(self.params["grid"]["Nel"], dtype=int) return int(markers["ppc"] * n_cells) elif "ppb" in markers: - n_boxes = xp.prod(species["boxes_per_dim"], dtype=int) * self.num_clones + n_boxes = np.prod(species["boxes_per_dim"], dtype=int) * self.num_clones return int(markers["ppb"] * n_boxes) def print_clone_config(self): @@ -209,7 +210,7 @@ def print_particle_config(self): row = f"{i_clone:6} " # Np = self.params["kinetic"][species_name]["markers"]["Np"] Np = self.get_Np_global(species_name) - n_cells_clone = xp.prod(self.params["grid"]["Nel"]) + n_cells_clone = np.prod(self.params["grid"]["Nel"]) Np_clone = self.get_Np_clone(Np, clone_id=i_clone) ppc_clone = Np_clone / n_cells_clone @@ -230,7 +231,7 @@ def print_particle_config(self): if marker_key in self.params["kinetic"][species_name]["markers"].keys(): params_value = self.params["kinetic"][species_name]["markers"][marker_key] if params_value is not None: - assert sum_value == params_value, f"{sum_value =} and {params_value =}" + assert sum_value == params_value, f"{sum_value = } and {params_value = }" sum_row += f"| {str(sum_value):30} " # Print the final message diff --git a/src/struphy/utils/cupy_vs_numpy.py b/src/struphy/utils/cupy_vs_numpy.py index 43f214d73..d32044a58 100644 --- a/src/struphy/utils/cupy_vs_numpy.py +++ b/src/struphy/utils/cupy_vs_numpy.py @@ -1,6 +1,6 @@ import time -import cunumpy as xp +from arrays import xp def main(N=8192): diff --git a/src/struphy/utils/mpi.py b/src/struphy/utils/mpi.py new file mode 100644 index 000000000..bb4eb10d4 --- /dev/null +++ b/src/struphy/utils/mpi.py @@ -0,0 +1,102 @@ +from dataclasses import dataclass +from time import time +from typing import TYPE_CHECKING + + +# Might not be needed +class MPICommWrapper: + def __init__(self, use_mpi=True): + self.use_mpi = use_mpi + if use_mpi: + from mpi4py import MPI + + self.comm = MPI.COMM_WORLD + else: + self.comm = MockComm() + + def __getattr__(self, name): + return getattr(self.comm, name) + + +class MockComm: + def __getattr__(self, name): + # Return a function that does nothing and returns None + def dummy(*args, **kwargs): + return None + + return dummy + + # Override some functions + def Get_rank(self): + return 0 + + def Get_size(self): + return 1 + + def Barrier(self): + return + + +class MPIwrapper: + def __init__(self, use_mpi: bool = False): + self.use_mpi = use_mpi + if use_mpi: + from mpi4py import MPI + + self._MPI = MPI + print("MPI is enabled") + else: + self._MPI = MockMPI() + print("MPI is NOT enabled") + + @property + def MPI(self): + return self._MPI + + +class MockMPI: + def __getattr__(self, name): + # Return a function that does nothing and returns None + def dummy(*args, **kwargs): + return None + + return dummy + + # Override some functions + @property + def COMM_WORLD(self): + return MockComm() + + # def comm_Get_rank(self): + # return 0 + + # def comm_Get_size(self): + # return 1 + + +try: + from mpi4py import MPI + + _comm = MPI.COMM_WORLD + rank = _comm.Get_rank() + size = _comm.Get_size() + mpi_enabled = size > 1 +except ImportError: + # mpi4py not installed + mpi_enabled = False +except Exception: + # mpi4py installed but not running under mpirun + mpi_enabled = False + +# TODO: add environment variable for mpi use +mpi_wrapper = MPIwrapper(use_mpi=mpi_enabled) + +# TYPE_CHECKING is True when type checking (e.g., mypy), but False at runtime. +if TYPE_CHECKING: + from mpi4py import MPI + + mpi = MPI +else: + mpi = mpi_wrapper.MPI + +print(f"{mpi = }") diff --git a/src/struphy/utils/set_release_dependencies.py b/src/struphy/utils/set_release_dependencies.py new file mode 100644 index 000000000..a08717060 --- /dev/null +++ b/src/struphy/utils/set_release_dependencies.py @@ -0,0 +1,76 @@ +import importlib.metadata +import re + +import tomli_w +import tomllib + + +def get_min_bound(entry): + match = re.search(r"(>=|==|~=|>|>)\s*([\w\.\-]+)", entry) + if match: + op, version = match.groups() + return f"{op}{version}" + return None + + +def get_max_bound(entry): + match = re.search(r"(<=|<)\s*([\w\.\-]+)", entry) + if match: + op, version = match.groups() + return f"{op}{version}" + return None + + +def get_package_name(entry): + return re.split(r"[<>=~]", entry.strip())[0].replace(" ", "") + + +def generate_updated_entry(package_name, package_deps): + ver_def = package_name + # Always set max version to the currently installed version + ver_def += f"<={package_deps['installed']}" + + if package_deps["min"]: + ver_def += f", {package_deps['min']}" + return ver_def + + +def update_dependencies(dependencies): + for i, entry in enumerate(dependencies): + package_name = get_package_name(entry) + + try: + installed_version = importlib.metadata.version(package_name) + + package_deps = {"installed": installed_version, "min": get_min_bound(entry), "max": get_max_bound(entry)} + + if package_deps["installed"]: + dependencies[i] = generate_updated_entry(package_name, package_deps) + + except importlib.metadata.PackageNotFoundError: + print(f"Warning: {package_name} not installed, skipping...") + continue + + # Remove psydac from the dependencies + for i, entry in enumerate(dependencies): + if "psydac" in entry: + dependencies.pop(i) + + +def main(): + with open("pyproject.toml", "rb") as f: + pyproject_data = tomllib.load(f) + + mandatory_dependencies = pyproject_data["project"]["dependencies"] + optional_dependency_groups = pyproject_data["project"]["optional-dependencies"] + + update_dependencies(mandatory_dependencies) + for group_name, group_deps in optional_dependency_groups.items(): + update_dependencies(group_deps) + + with open("pyproject.toml", "wb") as f: + tomli_w.dump(pyproject_data, f) + + +if __name__ == "__main__": + main() diff --git a/src/struphy/utils/test_clone_config.py b/src/struphy/utils/test_clone_config.py index b1c84139b..a9aa2c5c7 100644 --- a/src/struphy/utils/test_clone_config.py +++ b/src/struphy/utils/test_clone_config.py @@ -24,8 +24,8 @@ def test_clone_config(Nel, Np, num_clones): species: { "markers": { "Np": Np, - }, - }, + } + } }, } @@ -37,7 +37,7 @@ def test_clone_config(Nel, Np, num_clones): # Print outputs pconf.print_clone_config() pconf.print_particle_config() - print(f"{pconf.get_Np_clone(Np) =}") + print(f"{pconf.get_Np_clone(Np) = }") if __name__ == "__main__": diff --git a/src/struphy/utils/utils.py b/src/struphy/utils/utils.py index 171b61c23..f3d96c0f9 100644 --- a/src/struphy/utils/utils.py +++ b/src/struphy/utils/utils.py @@ -61,20 +61,19 @@ def save_state(state, libpath=STRUPHY_LIBPATH): def print_all_attr(obj): """Print all object's attributes that do not start with "_" to screen.""" - import cunumpy as xp + from struphy.utils.arrays import xp as np for k in dir(obj): if k[0] != "_": v = getattr(obj, k) - if isinstance(v, xp.ndarray): + if isinstance(v, np.ndarray): v = f"{type(getattr(obj, k))} of shape {v.shape}" if "proj_" in k or "quad_grid_" in k: v = "(arrays not displayed)" print(k.ljust(26), v) -def dict_to_yaml(dictionary: dict, output: str): - """Write dictionary to file and save in output.""" +def dict_to_yaml(dictionary, output): with open(output, "w") as file: yaml.dump( dictionary, @@ -111,15 +110,15 @@ def refresh_models(): list_fluid = [] fluid_string = "" for name, obj in inspect.getmembers(fluid): - if inspect.isclass(obj) and obj.__module__ == fluid.__name__: - # if name not in {"StruphyModel", "Propagator"}: - list_fluid += [name] - fluid_string += '"' + name + '"\n' + if inspect.isclass(obj): + if name not in {"StruphyModel", "Propagator"}: + list_fluid += [name] + fluid_string += '"' + name + '"\n' list_kinetic = [] kinetic_string = "" for name, obj in inspect.getmembers(kinetic): - if inspect.isclass(obj) and obj.__module__ == kinetic.__name__: + if inspect.isclass(obj): if name not in {"StruphyModel", "Propagator"}: list_kinetic += [name] kinetic_string += '"' + name + '"\n' @@ -127,7 +126,7 @@ def refresh_models(): list_hybrid = [] hybrid_string = "" for name, obj in inspect.getmembers(hybrid): - if inspect.isclass(obj) and obj.__module__ == hybrid.__name__: + if inspect.isclass(obj): if name not in {"StruphyModel", "Propagator"}: list_hybrid += [name] hybrid_string += '"' + name + '"\n' @@ -135,7 +134,7 @@ def refresh_models(): list_toy = [] toy_string = "" for name, obj in inspect.getmembers(toy): - if inspect.isclass(obj) and obj.__module__ == toy.__name__: + if inspect.isclass(obj): if name not in {"StruphyModel", "Propagator"}: list_toy += [name] toy_string += '"' + name + '"\n' @@ -203,6 +202,6 @@ def subp_run(cmd, cwd="libpath", check=True): for k, val in state.items(): print(k, val) i_path, o_path, b_path = get_paths(state) - print(f"{i_path =}") - print(f"{o_path =}") - print(f"{b_path =}") + print(f"{i_path = }") + print(f"{o_path = }") + print(f"{b_path = }")