diff --git a/Project.toml b/Project.toml index 48fa461f..14fb2702 100644 --- a/Project.toml +++ b/Project.toml @@ -4,13 +4,16 @@ version = "0.2.0" [deps] Bessels = "0e736298-9ec6-45e8-9647-e4fc86a2fe38" +ClassicalOrthogonalPolynomials = "b30e2e7b-c4ee-47da-9d5f-2c5c27239acd" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" ElementaryPDESolutions = "88a69b33-da0f-4502-8c8c-d680cf4d883b" +FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" HAdaptiveIntegration = "eaa5ad34-b243-48e9-b09c-54bc0655cecf" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearMaps = "7a12625a-238d-50fd-b39a-03d52299707e" NearestNeighbors = "b8a86587-4115-5ab1-83bc-aa920d37bbce" +OffsetArrays = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" @@ -18,6 +21,7 @@ Richardson = "708f8203-808e-40c0-ba2d-98a6953ed40d" Scratch = "6c6a2e73-6563-6170-7368-637461726353" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" +SpecialMatrices = "928aab9d-ef52-54ac-8ca1-acd7ca42c160" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" @@ -42,11 +46,13 @@ IntiQPGreenExt = "QPGreen" [compat] Bessels = "0.2" +ClassicalOrthogonalPolynomials = "0.15.7" DataStructures = "0.18 - 0.19" ElementaryPDESolutions = "0.2 - 0.3" FMM2D = "0.2" FMM3D = "1" FMMLIB2D = "0.3" +FastGaussQuadrature = "1.1.0" ForwardDiff = "0.10, 1" Gmsh = "0.3" HAdaptiveIntegration = "0.2" @@ -56,6 +62,7 @@ LinearMaps = "3" Makie = "0.21 - 0.24" Meshes = "0.42 - 0.55" NearestNeighbors = "0.4" +OffsetArrays = "1.17.0" Pkg = "1" Printf = "1" QPGreen = "1.0.1" @@ -64,6 +71,7 @@ Richardson = "1" Scratch = "1" SparseArrays = "1" SpecialFunctions = "2" +SpecialMatrices = "3.1.0" StaticArrays = "1" TOML = "1" julia = "1.9" diff --git a/docs/src/refs.bib b/docs/src/refs.bib index 09c85d9d..347fe888 100644 --- a/docs/src/refs.bib +++ b/docs/src/refs.bib @@ -143,3 +143,36 @@ @article{bernardi1989optimal year={1989}, publisher={SIAM} } + +@article{helsing2008evaluation, + title={On the evaluation of layer potentials close to their sources}, + author={Helsing, Johan and Ojala, Rikard}, + journal={Journal of Computational Physics}, + volume={227}, + number={5}, + pages={2899--2921}, + year={2008}, + publisher={Elsevier} +} + +@article{helsing2015variants, + title={Variants of an explicit kernel-split panel-based Nystr{\"o}m discretization scheme for Helmholtz boundary value problems}, + author={Helsing, Johan and Holst, Anders}, + journal={Advances in Computational Mathematics}, + volume={41}, + number={3}, + pages={691--708}, + year={2015}, + publisher={Springer} +} + +@article{fryklund2022adaptive, + title={An adaptive kernel-split quadrature method for parameter-dependent layer potentials}, + author={Fryklund, Fredrik and Klinteberg, Ludvig af and Tornberg, Anna-Karin}, + journal={Advances in Computational Mathematics}, + volume={48}, + number={2}, + pages={12}, + year={2022}, + publisher={Springer} +} \ No newline at end of file diff --git a/src/Inti.jl b/src/Inti.jl index 7df4521f..f4ae89d7 100644 --- a/src/Inti.jl +++ b/src/Inti.jl @@ -50,6 +50,9 @@ include("nystrom.jl") include("adaptive_correction.jl") include("bdim.jl") include("vdim.jl") +# include("kernel_split.jl") +# include("adaptive_kernel_split.jl") +include("ksplit.jl") # some zero-argument methods for the Inti's gmsh extension include("gmsh_api.jl") diff --git a/src/api.jl b/src/api.jl index 26cbccf1..8776ecf7 100644 --- a/src/api.jl +++ b/src/api.jl @@ -11,7 +11,7 @@ const COMPRESSION_METHODS = [:none, :hmatrix, :fmm] Available correction methods for the singular and nearly-singular integrals in [`Inti`](@ref). """ -const CORRECTION_METHODS = [:none, :dim, :adaptive] +const CORRECTION_METHODS = [:none, :dim, :adaptive, :ksplit, :ksplit_adaptive] """ single_double_layer(; op, target, source::Quadrature, compression, @@ -163,6 +163,108 @@ function single_double_layer(; correction_kw = Base.structdiff(correction, NamedTuple{(:method,)}) δS = adaptive_correction(Sop; correction_kw...) δD = adaptive_correction(Dop; correction_kw...) + elseif correction.method == :ksplit + # Extract the required geometric args from the correction tuple + geo_args = ( + source_quad_connectivity = correction.connectivity, + source_el = correction.elements, + velocity_fn = correction.velocity_fn, + curvature_fn = correction.curvature_fn, + boundary_inv = correction.boundary_inv, + ) + + # Extract the ksplit-specific optional keywords + ksplit_kw_names = + (:n_panel_corr, :maxdist, :target_location, :parametric_length) + ksplit_kwargs = filter(kv -> kv[1] in ksplit_kw_names, pairs(correction)) + + if target === source + if !haskey(ksplit_kwargs, :n_panel_corr) + error( + "For :ksplit correction with `target === source`, you must provide `n_panel_corr` (e.g., `correction = (method=:ksplit, n_panel_corr=3, ...)`).", + ) + end + else # target !== source + if !haskey(ksplit_kwargs, :maxdist) + error( + "For :ksplit correction with `target !== source`, you must provide `maxdist` (e.g., `correction = (method=:ksplit, maxdist=0.1, ...)`).", + ) + end + end + + δS = kernel_split_correction( + op, + source, + geo_args..., + Sop, + target; + layer_type = :single, + ksplit_kwargs..., + ) + δD = kernel_split_correction( + op, + source, + geo_args..., + Dop, + target; + layer_type = :double, + ksplit_kwargs..., + ) + elseif correction.method == :ksplit_adaptive + # Extract the required geometric args + geo_args = ( + source_quad_connectivity = correction.connectivity, + source_el = correction.elements, + velocity_fn = correction.velocity_fn, + curvature_fn = correction.curvature_fn, + boundary_inv = correction.boundary_inv, + ) + + # Extract the adaptive ksplit-specific keywords + ksplit_adaptive_kw_names = ( + :n_panel_corr, + :maxdist, + :target_location, + :Cε, + :Rε, + :parametric_length, + :affine_preimage, + ) + ksplit_adaptive_kwargs = filter(kv -> kv[1] in ksplit_adaptive_kw_names, pairs(correction)) + + if target === source + if !haskey(ksplit_adaptive_kwargs, :n_panel_corr) + error( + "For :ksplit correction with `target === source`, you must provide `n_panel_corr` (e.g., `correction = (method=:ksplit_adaptive, n_panel_corr=3, ...)`).", + ) + end + else # target !== source + if !haskey(ksplit_adaptive_kwargs, :maxdist) + error( + "For :ksplit correction with `target !== source`, you must provide `maxdist` (e.g., `correction = (method=:ksplit_adaptive, maxdist=0.1, ...)`).", + ) + end + end + + # Call your adaptive_kernel_split_correction function + δS = adaptive_kernel_split_correction( + op, + source, + geo_args..., + Sop, + target; + layer_type = :single, + ksplit_adaptive_kwargs... + ) + δD = adaptive_kernel_split_correction( + op, + source, + geo_args..., + Dop, + target; + layer_type = :double, + ksplit_adaptive_kwargs... + ) else error("Unknown correction method. Available options: $CORRECTION_METHODS") end diff --git a/src/ksplit.jl b/src/ksplit.jl new file mode 100644 index 00000000..1b38dc89 --- /dev/null +++ b/src/ksplit.jl @@ -0,0 +1,948 @@ +include("ksplit_utils.jl") + +""" + kernel_split_correction( + op, + source_quad::Quadrature, + source_quad_connectivity, + source_el, + velocity_fn, + curvature_fn, + boundary_inv, + Lop, + target=nothing; + kwargs... + ) +) + +Given an operator `op` and a Gauss Legendre Nyström discretization `Lop` +defined on a source quadrature `source_quad`, compute a correction `δL` such +that `Lop + δL` is a more accurate approximation of the underlying integral +operator. + +This function implements a high-order quadrature correction based on variants +of Helsing's kernel split method. It explicitly splits the kernel `G` into a +smooth part, a log-singular part, and a Cauchy-singular part `G_C`: + +G(x, y) = G_S(x, y) + G_L(x, y) log|x - y| + G_C(x, y) frac{(x - y) ⋅ n(y)}{|x - y|^2} + +and uses product integration to integrate each singular part analytically. + +See also [`helsing2008evaluation`](@ref) and [`helsing2015variants`](@ref). + +A good rule of thumb for choosing the mesh size `h` relative to the wavenumber `k` +for a Helmholtz operator when a 16-point Gauss Legendre quadrature is `h ≈ π / k`. +This choice gives about 12 digits of accuracy for interior Dirichlet problems on +a circle of radius 1. For more complicated geometries, a finer mesh may be needed. + +# Arguments + +## Required: + +- `op`: Must be an [`AbstractDifferentialOperator`](@ref) (e.g., `Inti.Laplace`). +- `source_quad`: A [`Quadrature`](@ref) object for the source boundary `Y`. +- `source_quad_connectivity`: Connectivity matrix for `source_quad` points. +- `source_el`: A vector of panel parametrization functions for `source_quad`. +- `velocity_fn`: Function `t -> SVector` for the boundary's parametric velocity `v(t)`. +- `curvature_fn`: Function `t -> Float64` for the boundary's parametric signed curvature `κ(t)`. +- `boundary_inv`: Function `x -> t` mapping physical points `x` to parameter `t`. +- `Lop`: Approximated integral operators (e.g., from `Inti.assemble_operator`) to be corrected. + +## Optional: + +- `target=nothing`: The target points `X`. If `nothing` or `=== source_quad`, + computes the on-surface correction. Otherwise, computes the off-surface + correction for near-field interactions. +- `n_panel_corr=3`: The total number of panels (self + neighbors) in the + fixed neighborhood of the source panel for the on-surface correction. + Must be an odd integer. The default provides a good balance between accuracy + and computational cost and works the majority of the time. +- `maxdist=0.1`: distance beyond which interactions are considered sufficiently far + so that no correction is needed. This is used to determine a threshold for + nearly-singular corrections when `X` and `Y` are different domains. When `X + === Y`, this is not needed. +- `target_location=nothing`: Passed to `wLCinit` for off-surface correction + (e.g., `:inside`, `:outside`). +- `layer_type=:single`: The layer potential type (`:single` or `:double`). +- `parametric_length=1.0`: The total length of the parametric domain (e.g., + `1.0` or `2π`). +""" + +function kernel_split_correction( + op, + source_quad::Quadrature, + source_quad_connectivity, + source_el, + velocity_fn, + curvature_fn, + boundary_inv, + Lop, + target = nothing; + n_panel_corr = 3, + maxdist = 0.1, + target_location = nothing, + layer_type = :single, + parametric_length = 1.0, + ) + if isnothing(target) + target = source_quad + end + speed_fn(θ) = norm(velocity_fn(θ)) + T = eltype(Lop) # element type of the operator + + # set coefficient functions for the kernel split + kernels = _get_ksplit_kernels(op, layer_type, T) + G_S = kernels.G_S + G_L = kernels.G_L + G_C = kernels.G_C + + Is, Js, Ls = Int[], Int[], T[] + + n_quad_pt = size(source_quad_connectivity)[1] + n_el = size(source_quad_connectivity)[2] + t_Leg_ref, w_Leg_ref = gausslegendre(n_quad_pt) + + if target == source_quad + # define the weight matrix for the product integrations + n = size(Lop)[1] + + # Build the connectivity map to handle randomly ordered panels + panel_to_nodes_map, node_to_panel_map = build_neighbor_information(source_el, n_el) + + # define the correction distance + n_panel_corr_dist = round(Int, (n_panel_corr - 1) / 2) + + # Pre-compute parametric intervals and sizes for all panels + panel_intervals = Vector{Tuple{Float64, Float64}}(undef, n_el) + for i in 1:n_el + node_a = source_el[i](0) + node_b = source_el[i](1) + t_a = boundary_inv(node_a) + t_b = boundary_inv(node_b) + if (t_b - t_a) < -parametric_length / 2 # Handle branch cut + t_b += parametric_length + end + panel_intervals[i] = (t_a, t_b) + end + + # loop over each element to compute the correction δL + for j_el in 1:n_el + j_el_vec = OffsetArray( + Vector{Int}(undef, 2 * n_panel_corr_dist + 1), + -n_panel_corr_dist:n_panel_corr_dist, + ) + j_el_vec[0] = j_el + + # Find neighbors in the "forward" direction + current_panel_idx = j_el + (start_node_id, end_node_id) = panel_to_nodes_map[current_panel_idx] + current_node_id = end_node_id + for k in 1:n_panel_corr_dist + connected_panels = node_to_panel_map[current_node_id] + next_panel_idx = if (connected_panels[1] == current_panel_idx) + connected_panels[2] + else + connected_panels[1] + end + j_el_vec[k] = next_panel_idx + + # Update for the next iteration + current_panel_idx = next_panel_idx + (next_start_id, next_end_id) = panel_to_nodes_map[current_panel_idx] + current_node_id = + (next_start_id == current_node_id) ? next_end_id : next_start_id + end + + # Find neighbors in the "backward" direction + current_panel_idx = j_el + (start_node_id, end_node_id) = panel_to_nodes_map[current_panel_idx] + current_node_id = start_node_id + for k in -1:-1:-n_panel_corr_dist + connected_panels = node_to_panel_map[current_node_id] + next_panel_idx = if (connected_panels[1] == current_panel_idx) + connected_panels[2] + else + connected_panels[1] + end + j_el_vec[k] = next_panel_idx + + # Update for the next iteration + current_panel_idx = next_panel_idx + (next_start_id, next_end_id) = panel_to_nodes_map[current_panel_idx] + current_node_id = + (next_start_id == current_node_id) ? next_end_id : next_start_id + end + + idx_global_corr_vec = (j_el_vec .- 1) * n_quad_pt + + # Get source panel j_el info + (t_a_j, t_b_j) = panel_intervals[j_el] + H_j = (t_b_j - t_a_j) / 2 # Source panel half-length + + # The calculation for W_L_c_corr (the k=0 case) is based on a standard interval + 𝔚_L_diag = WfrakLinit(0, 1, t_Leg_ref, n_quad_pt) + W_L_c_corr = zeros(n_quad_pt, n_quad_pt) + for i in 1:n_quad_pt, j in 1:n_quad_pt + W_L_c_corr[i, j] = + 𝔚_L_diag[i, j] / w_Leg_ref[j] - + (i != j ? log(abs(t_Leg_ref[i] - t_Leg_ref[j])) : 0) + end + + # Add extra singular correction for diagonal elements + for i in 1:n_quad_pt + quad_i = source_quad[idx_global_corr_vec[0] + i] + t_i = boundary_inv(Inti.coords(quad_i)) + W_L_c_corr[i, i] += log(H_j * speed_fn(t_i)) + push!(Is, idx_global_corr_vec[0] + i) + push!(Js, idx_global_corr_vec[0] + i) + push!( + Ls, + G_C(quad_i, quad_i) * Inti.weight(quad_i) * (-curvature_fn(t_i) / 2), + ) + end + + # Loop over neighbors and compute corrections dynamically + for k in -n_panel_corr_dist:n_panel_corr_dist + j_k = j_el_vec[k] # This is the target panel index + + local W_L_matrix + if k == 0 + W_L_matrix = W_L_c_corr + else + # Get target panel j_k info + (t_a_k, t_b_k) = panel_intervals[j_k] + C_k = (t_a_k + t_b_k) / 2 + H_k = (t_b_k - t_a_k) / 2 + + # Center of source panel + C_j = (t_a_j + t_b_j) / 2 + + # Correct distance calculation for periodic domain --- + dist = C_k - C_j + if dist > parametric_length / 2 + dist -= parametric_length + elseif dist < -parametric_length / 2 + dist += parametric_length + end + trans = dist / H_j + scale = H_k / H_j + + 𝔚_L_offdiag = WfrakLinit(trans, scale, t_Leg_ref, n_quad_pt) + W_L_matrix = zeros(n_quad_pt, n_quad_pt) + + t_target_in_source_frame = trans .+ scale .* t_Leg_ref + for i in 1:n_quad_pt, j in 1:n_quad_pt + W_L_matrix[i, j] = + 𝔚_L_offdiag[i, j] / w_Leg_ref[j] - + log(abs(t_target_in_source_frame[i] - t_Leg_ref[j])) + end + end + + # Apply the correction using the dynamically computed W_L_matrix + for i in 1:n_quad_pt + quad_i = source_quad[idx_global_corr_vec[k] + i] + for j in 1:n_quad_pt + quad_j = source_quad[idx_global_corr_vec[0] + j] + quad_weight_j = Inti.weight(quad_j) + + correction_term = + G_L(quad_i, quad_j) * quad_weight_j * W_L_matrix[i, j] + if k == 0 && i == j + correction_term += G_S(quad_i, quad_i) * Inti.weight(quad_i) + end + + push!(Is, idx_global_corr_vec[k] + i) + push!(Js, idx_global_corr_vec[0] + j) + push!(Ls, correction_term) + end + end + end + end + δL = sparse(Is, Js, Ls, n, n) + else # target != source + n_target = length(target) + n_source = length(source_quad) + + dict_near = near_points_vec(target, source_quad; maxdist = maxdist) + near_idx = first(dict_near)[2] + + for j_el in 1:n_el + idx_global_corr = (j_el - 1) * n_quad_pt + for i in near_idx[j_el] + target_node_i = Inti.coords(target[i]) + target_node_i_complex = target_node_i[1] + im * target_node_i[2] + + node_a = source_el[j_el](0) + node_b = source_el[j_el](1) + node_a_complex = node_a[1] + im * node_a[2] + node_b_complex = node_b[1] + im * node_b[2] + t_a = boundary_inv(node_a) + t_b = boundary_inv(node_b) + + quad_j_el = source_quad[(idx_global_corr + 1):(idx_global_corr + n_quad_pt)] + + quad_node_j_el = Inti.coords.(quad_j_el) + quad_node_j_el_complex = + [quad_node_j[1] + im * quad_node_j[2] for quad_node_j in quad_node_j_el] + quad_normal_j_el = Inti.normal.(quad_j_el) + quad_normal_j_el_complex = [ + quad_normal_j[1] + im * quad_normal_j[2] for + quad_normal_j in quad_normal_j_el + ] + quad_weight_j_el = Inti.weight.(quad_j_el) + + # tj = real(boundary_inv.(quad_node_j_el)) # this might be unnecessary + tj = boundary_inv.(quad_node_j_el) + + # check for branch cut for the boundary_inv function and correct it if necessary + if (t_b - t_a) < -parametric_length / 2 + t_b += parametric_length + end + + quad_velocity_weight_j_el = (t_b - t_a) / 2 .* velocity_fn.(tj) .* w_Leg_ref + + wcorrL, wcmpC = wLCinit( + node_a_complex, + node_b_complex, + target_node_i_complex, + quad_node_j_el_complex, + quad_normal_j_el_complex, + quad_velocity_weight_j_el, + n_quad_pt; + target_location = target_location, + ) + + ra, rb, r, rj, nuj, rpwj, npt = node_a_complex, + node_b_complex, + target_node_i_complex, + quad_node_j_el_complex, + quad_normal_j_el_complex, + quad_velocity_weight_j_el, + n_quad_pt + + # loop over each quadrature point to compute the correction δL + for j in 1:n_quad_pt + push!(Is, i) + push!(Js, idx_global_corr + j) + push!( + Ls, + G_L(target[i], quad_j_el[j]) * quad_weight_j_el[j] * wcorrL[j] + + G_C(target[i], quad_j_el[j]) * wcmpC[j], + ) + end + end + end + δL = sparse(Is, Js, Ls, n_target, n_source) + end + + return δL +end + +""" + adaptive_kernel_split_correction( + op, + source_quad::Quadrature, + source_quad_connectivity, + source_el, + velocity_fn, + curvature_fn, + boundary_inv, + Lop, + target=nothing; + kwargs... + ) +) + +Given an operator `op` and a Gauss Legendre Nyström discretization `Lop` +defined on a source quadrature `source_quad`, compute a correction `δL` such +that `Lop + δL` is a more accurate approximation of the underlying integral +operator. + +This function implements an adaptive version of the Helsing's kernel split method +by Fryklund et al, specifically for a class of modified PDEs, such as the modified +Helmholtz (Yukawa) equation. It uses per-target adaptive sampling of the source geometry. +The refinement is carried out through recursive bisection, maintaining accuracy for +a wide range of the parameter α. + +See also [`fryklund2022adaptive`](@ref). + +# Arguments + +## Required: + +- `op`: Must be an [`AbstractDifferentialOperator`](@ref) (e.g., `Inti.Laplace`). +- `source_quad`: A [`Quadrature`](@ref) object for the source boundary `Y`. +- `source_quad_connectivity`: Connectivity matrix for `source_quad` points. +- `source_el`: A vector of panel parametrization functions for `source_quad`. +- `velocity_fn`: Function `t -> SVector` for the boundary's parametric velocity `v(t)`. +- `curvature_fn`: Function `t -> Float64` for the boundary's parametric signed curvature `κ(t)`. +- `boundary_inv`: Function `x -> t` mapping physical points `x` to parameter `t`. +- `Lop`: Approximated integral operators (e.g., from `Inti.assemble_operator`) to be corrected. + +## Optional: + +- `target=nothing`: The target points `X`. If `nothing` or `=== source_quad`, + computes the on-surface correction. Otherwise, computes the off-surface + correction for near-field interactions. +- `n_panel_corr=3`: The total number of panels (self + neighbors) in the + fixed neighborhood of the source panel for the on-surface correction. + Must be an odd integer. The default provides a good balance between accuracy + and computational cost and works the majority of the time. +- `maxdist=0.1`: distance beyond which interactions are considered sufficiently far + so that no correction is needed. This is used to determine a threshold for + nearly-singular corrections when `X` and `Y` are different domains. When `X + === Y`, this is not needed. +- `target_location=nothing`: Passed to `wLCinit` for off-surface correction + (e.g., `:inside`, `:outside`). +- `layer_type=:single`: The layer potential type (`:single` or `:double`). +- `Cε=3.5`: The accuracy constant for the kernel-split method, used in the subinterval + length criterion: `Δt ≤ 2*Cε / (α*h)`. It limits the product `α*h` to ensure that + the smooth, but exponentially-growing, coefficient functions (e.g., `G_L` from the + kernel split) can be accurately approximated by polynomials. The default value works + well in practice. See [`fryklund2022adaptive`](@ref) for details. +- `Rε=3.7`: The cutoff radius for the "near evaluation criterion", which determines + whether to use kernel-split quadrature or standard Gauss Legendre quadrature. The + default value works well in practice. See [`fryklund2022adaptive`](@ref) for details. +- `parametric_length=1.0`: The total length of the parametric domain (e.g., + `1.0` or `2π`). +""" + +function adaptive_kernel_split_correction( + op, + source_quad, + source_quad_connectivity, + source_el, + velocity_fn, + curvature_fn, + boundary_inv, + Lop, + target = nothing; + n_panel_corr = 3, + maxdist = 0.1, + target_location = nothing, + layer_type = :single, + Cε = 3.5, + Rε = 3.7, + parametric_length = 1.0, + affine_preimage::Bool = true, + ) + if isnothing(target) + target = source_quad + end + speed_fn(θ) = norm(velocity_fn(θ)) + T = eltype(Lop) # element type of the operator + + if layer_type == :single + G = Inti.SingleLayerKernel(op) + elseif layer_type == :double + G = Inti.DoubleLayerKernel(op) + else + throw("Unsupported layer type: $layer_type") + end + + # set coefficient functions for the kernel split + kernels = _get_ksplit_kernels(op, layer_type, T) + G_S = kernels.G_S + G_L = kernels.G_L + G_C = kernels.G_C + + Is, Js, Ls = Int[], Int[], T[] + + # define parameters for the adaptive kernel split + n_quad_pt = size(source_quad_connectivity)[1] + n_el = size(source_quad_connectivity)[2] + t_Leg_ref, w_Leg_ref = gausslegendre(n_quad_pt) + L_Leg = L_Legendre_matrix(n_quad_pt) + w_bary = LegendreBaryWeights(n_quad_pt) + + # FIXME: placeholder for Laplace + if op isa Inti.Laplace{2} + α = 5π + @warn( + "Using placeholder value α = 5π for Laplace operator in adaptive kernel split", + ) + elseif op isa Inti.Helmholtz{2, Float64} + α = op.k + @warn("Adaptive kernel split does not provide computational advantages for Helmholtz + operators. Consider using the standard kernel split with fixed number of points + per wavelength instead.") + elseif op isa Inti.Yukawa{2, Float64} + α = op.λ + else + throw("Operator type not supported for adaptive kernel split") + end + + if target == source_quad + n_source = n_target = size(Lop)[1] # total number of dofs + + # define the correction distance + n_panel_corr_dist = round(Int, (n_panel_corr - 1) / 2) + + # Build connectivity maps and pre-compute panel intervals + panel_to_nodes_map, node_to_panel_map = build_neighbor_information(source_el, n_el) + + panel_intervals = Vector{Tuple{Float64, Float64}}(undef, n_el) + for i in 1:n_el + t_a = boundary_inv(source_el[i](0)) + t_b = boundary_inv(source_el[i](1)) + if (t_b - t_a) < -parametric_length / 2 + t_b += parametric_length + end + panel_intervals[i] = (t_a, t_b) + end + + # loop over each element to compute the correction δL + for j_el in 1:n_el + + # Find neighbors using connectivity map + j_el_vec = OffsetArray( + Vector{Int}(undef, 2 * n_panel_corr_dist + 1), + -n_panel_corr_dist:n_panel_corr_dist, + ) + j_el_vec[0] = j_el + # Find neighbors in the "forward" direction + current_panel_idx = j_el + (start_node_id, end_node_id) = panel_to_nodes_map[current_panel_idx] + current_node_id = end_node_id + for k_fwd in 1:n_panel_corr_dist + connected_panels = node_to_panel_map[current_node_id] + next_panel_idx = if (connected_panels[1] == current_panel_idx) + connected_panels[2] + else + connected_panels[1] + end + j_el_vec[k_fwd] = next_panel_idx + current_panel_idx = next_panel_idx + (next_start_id, next_end_id) = panel_to_nodes_map[current_panel_idx] + current_node_id = + (next_start_id == current_node_id) ? next_end_id : next_start_id + end + # Find neighbors in the "backward" direction + current_panel_idx = j_el + (start_node_id, end_node_id) = panel_to_nodes_map[current_panel_idx] + current_node_id = start_node_id + for k_bwd in -1:-1:-n_panel_corr_dist + connected_panels = node_to_panel_map[current_node_id] + next_panel_idx = if (connected_panels[1] == current_panel_idx) + connected_panels[2] + else + connected_panels[1] + end + j_el_vec[k_bwd] = next_panel_idx + current_panel_idx = next_panel_idx + (next_start_id, next_end_id) = panel_to_nodes_map[current_panel_idx] + current_node_id = + (next_start_id == current_node_id) ? next_end_id : next_start_id + end + + idx_global_corr_vec = (j_el_vec .- 1) * n_quad_pt + + # Get source panel info + (t_a_j, t_b_j) = panel_intervals[j_el] + C_j = (t_a_j + t_b_j) / 2 + H_j = (t_b_j - t_a_j) / 2 + + # This must be defined here, as it's specific to the source panel `j_el` + panel_parametrization = t -> SVector(source_el[j_el](scale_fn(0, 1, t))) + + # Compute subdivisions for all neighbors relative to the current source panel `j_el` + panel_subdivision_vec = OffsetArray( + Vector{Vector{Vector{Tuple}}}(undef, n_panel_corr), + -n_panel_corr_dist:n_panel_corr_dist, + ) + + ksplit_bool_vec = OffsetArray( + Vector{Vector{Vector{Bool}}}(undef, n_panel_corr), + -n_panel_corr_dist:n_panel_corr_dist, + ) + + for k in -n_panel_corr_dist:n_panel_corr_dist + panel_subdivision_vec[k], ksplit_bool_vec[k] = adaptive_refinement( + source_quad[(idx_global_corr_vec[0] + 1):(idx_global_corr_vec[0] + n_quad_pt)], + source_el[j_el], + L_Leg, + α, + Cε, + Rε, + target[(idx_global_corr_vec[k] + 1):(idx_global_corr_vec[k] + n_quad_pt)], + affine_preimage = affine_preimage, + ) + end + + if isodd(n_quad_pt) + throw("n_quad_pt = odd case not implemented") + end + + # compute the local correction matrix δL_local_vec + for k in -n_panel_corr_dist:n_panel_corr_dist + j_k = j_el_vec[k] # Target panel index + + # Get target panel info and compute trans and scale + (t_a_k, t_b_k) = panel_intervals[j_k] + C_k = (t_a_k + t_b_k) / 2 + H_k = (t_b_k - t_a_k) / 2 + + dist = C_k - C_j + if dist > parametric_length / 2 + dist -= parametric_length + elseif dist < -parametric_length / 2 + dist += parametric_length + end + trans = dist / H_j + scale = H_k / H_j + + for i in 1:n_quad_pt + + quad_i = target[idx_global_corr_vec[k] + i] + t_i = boundary_inv(Inti.coords(quad_i)) + M_sub = length(panel_subdivision_vec[k][i]) + + # No subdivision but kernel split needed + if M_sub == 1 && ksplit_bool_vec[k][i][1] == true + # Determine trans and scale for the current panel pair (j_el, j_k) + local trans, scale + if k == 0 + trans = 0.0 + scale = 1.0 + else + (t_a_k, t_b_k) = panel_intervals[j_k] + C_k = (t_a_k + t_b_k) / 2 + H_k = (t_b_k - t_a_k) / 2 + dist = C_k - C_j + if dist > parametric_length / 2 + dist -= parametric_length + end + if dist < -parametric_length / 2 + dist += parametric_length + end + trans = dist / H_j + scale = H_k / H_j + end + + # Compute W_L_matrix with a single, conditional log term + 𝔚_L_matrix = WfrakLinit(trans, scale, t_Leg_ref, n_quad_pt) + W_L_matrix = zeros(n_quad_pt, n_quad_pt) + t_target_in_source_frame = trans .+ scale .* t_Leg_ref + + for i_w in 1:n_quad_pt, j_w in 1:n_quad_pt + log_arg = abs(t_target_in_source_frame[i_w] - t_Leg_ref[j_w]) + # The log is only singular when k=0 and i_w=j_w. + log_term = (k == 0 && i_w == j_w) ? 0.0 : log(log_arg) + W_L_matrix[i_w, j_w] = + 𝔚_L_matrix[i_w, j_w] / w_Leg_ref[j_w] - log_term + end + + if k == 0 + W_L_matrix[i, i] += log(H_j * speed_fn(t_i)) + push!(Is, idx_global_corr_vec[0] + i) + push!(Js, idx_global_corr_vec[0] + i) + push!( + Ls, + G_C(quad_i, quad_i) * + Inti.weight(quad_i) * + (-curvature_fn(t_i) / 2), + ) + end + + for j in 1:n_quad_pt + quad_j = source_quad[idx_global_corr_vec[0] + j] + correction_term = + G_L(quad_i, quad_j) * Inti.weight(quad_j) * W_L_matrix[i, j] + if k == 0 && i == j + correction_term += G_S(quad_i, quad_i) * Inti.weight(quad_i) + end + push!(Is, idx_global_corr_vec[k] + i) + push!(Js, idx_global_corr_vec[0] + j) + push!(Ls, correction_term) + end + + else # subdivision needed + # subtract regular coarse quadrature + for j in 1:n_quad_pt + quad_j = source_quad[idx_global_corr_vec[0] + j] + push!(Is, idx_global_corr_vec[k] + i) + push!(Js, idx_global_corr_vec[0] + j) + push!(Ls, -G(quad_i, quad_j) * Inti.weight(quad_j)) + end + + for k_sub in 1:M_sub + subpanel_node1, subpanel_node2 = + panel_subdivision_vec[k][i][k_sub] + + t_Leg_sub = scale_fn(subpanel_node1, subpanel_node2, t_Leg_ref) + + panel_curve = Inti.parametric_curve( + panel_parametrization_fn(source_el[j_el]), + subpanel_node1, + subpanel_node2, + ) + + panel_msh = Inti.meshgen( + panel_curve; + meshsize = (subpanel_node2 - subpanel_node1), + ) + panel_quad = + Inti.Quadrature(panel_msh, Inti.GaussLegendre(n_quad_pt)) + + # Add regular refined quadrature contribution + for ĵ in 1:n_quad_pt + quad_ĵ = panel_quad[ĵ] + quad_weight_ĵ = Inti.weight(quad_ĵ) + bary_coef = Barycentric_coef( + t_Leg_sub[ĵ]; + n = n_quad_pt, + t_interp = t_Leg_ref, + w = w_bary, + ) + for j in 1:n_quad_pt + push!(Is, idx_global_corr_vec[k] + i) + push!(Js, idx_global_corr_vec[0] + j) + push!( + Ls, + G(quad_i, quad_ĵ) * quad_weight_ĵ * bary_coef[j], + ) + end + end + + # Add kernel split correction + if ksplit_bool_vec[k][i][k_sub] == 1 + # The generalized target_node calculation is correct and remains. + dist_ti = t_i - C_j + if dist_ti > parametric_length / 2 + dist_ti -= parametric_length + end + if dist_ti < -parametric_length / 2 + dist_ti += parametric_length + end + τ_i = dist_ti / H_j + + target_node = + (2 * τ_i - (subpanel_node1 + subpanel_node2)) / + (subpanel_node2 - subpanel_node1) + + 𝔚_L_vec = WfrakLinit(target_node, t_Leg_ref, n_quad_pt) + W_L_vec = zeros(n_quad_pt) + for j_w in 1:n_quad_pt + W_L_vec[j_w] = + 𝔚_L_vec[j_w] / w_Leg_ref[j_w] - + log(abs(target_node - t_Leg_ref[j_w])) + end + + # Use the robust panel_quad object for the correction sum + for ĵ in 1:n_quad_pt + quad_ĵ = panel_quad[ĵ] + quad_weight_ĵ = Inti.weight(quad_ĵ) + bary_coef = Barycentric_coef( + t_Leg_sub[ĵ]; + n = n_quad_pt, + t_interp = t_Leg_ref, + w = w_bary, + ) + correction_term = + G_L(quad_i, quad_ĵ) * quad_weight_ĵ * W_L_vec[ĵ] + for j in 1:n_quad_pt + push!(Is, idx_global_corr_vec[k] + i) + push!(Js, idx_global_corr_vec[0] + j) + push!(Ls, correction_term * bary_coef[j]) + end + end + end + end + end + end + end + end + else # target != source + n_target = length(target) + n_source = length(source_quad) + + # Pre-compute parametric intervals for all source panels + panel_intervals = Vector{Tuple{Float64, Float64}}(undef, n_el) + for i in 1:n_el + t_a_i = boundary_inv(source_el[i](0)) + t_b_i = boundary_inv(source_el[i](1)) + if (t_b_i - t_a_i) < -parametric_length / 2 + t_b_i += parametric_length + end + panel_intervals[i] = (t_a_i, t_b_i) + end + + dict_near = near_points_vec(target, source_quad; maxdist = maxdist) + near_idx = first(dict_near)[2] + + for j_el in 1:n_el + idx_global_corr = (j_el - 1) * n_quad_pt + panel_subdivision_vec, ksplit_bool_vec = adaptive_refinement( + source_quad[(idx_global_corr + 1):(idx_global_corr + n_quad_pt)], + source_el[j_el], + L_Leg, + α, + Cε, + Rε, + target[near_idx[j_el]], + affine_preimage = affine_preimage, + ) + + # Get pre-computed, corrected panel information + (t_a, t_b) = panel_intervals[j_el] + node_a = source_el[j_el](0) + node_a_complex = node_a[1] + im * node_a[2] + node_b = source_el[j_el](1) + node_b_complex = node_b[1] + im * node_b[2] + panel_parametrization = panel_parametrization_fn(source_el[j_el]) + + n_near_pt = length(near_idx[j_el]) + for i in 1:n_near_pt # Loop over each nearby target point + near_idx_i = near_idx[j_el][i] + target_i = target[near_idx_i] + target_node_i_complex = + Inti.coords(target_i)[1] + im * Inti.coords(target_i)[2] + + M_sub = length(panel_subdivision_vec[i]) + + if M_sub == 1 && ksplit_bool_vec[i][1] == true # No subdivision, but kernel split + quad_j_el = source_quad[(idx_global_corr + 1):(idx_global_corr + n_quad_pt)] + quad_node_j_el = Inti.coords.(quad_j_el) + quad_node_j_el_complex = [q[1] + im * q[2] for q in quad_node_j_el] + quad_normal_j_el_complex = + [Inti.normal(q)[1] + im * Inti.normal(q)[2] for q in quad_j_el] + + tj = boundary_inv.(quad_node_j_el) + quad_velocity_weight_j_el = + (t_b - t_a) / 2 .* velocity_fn.(tj) .* w_Leg_ref + + wcorrL, wcmpC = wLCinit( + node_a_complex, + node_b_complex, + target_node_i_complex, + quad_node_j_el_complex, + quad_normal_j_el_complex, + quad_velocity_weight_j_el, + n_quad_pt; + target_location = target_location, + ) + + for j in 1:n_quad_pt + push!(Is, near_idx_i) + push!(Js, idx_global_corr + j) + push!( + Ls, + G_L(target_i, quad_j_el[j]) * + Inti.weight(quad_j_el[j]) * + wcorrL[j] + G_C(target_i, quad_j_el[j]) * wcmpC[j], + ) + end + + elseif M_sub > 1 # Subdivision needed + # Subtract original coarse quadrature + for j in 1:n_quad_pt + quad_j = source_quad[idx_global_corr + j] + push!(Is, near_idx_i) + push!(Js, idx_global_corr + j) + push!(Ls, -G(target_i, quad_j) * Inti.weight(quad_j)) + end + + for k_sub in 1:M_sub # Add contributions from each subpanel + subpanel_node1, subpanel_node2 = panel_subdivision_vec[i][k_sub] + + # Create the subpanel quadrature object using meshgen + t_Leg_sub = scale_fn(subpanel_node1, subpanel_node2, t_Leg_ref) + panel_curve = Inti.parametric_curve( + panel_parametrization, + subpanel_node1, + subpanel_node2, + ) + panel_msh = Inti.meshgen( + panel_curve; + meshsize = (subpanel_node2 - subpanel_node1), + ) + panel_quad = + Inti.Quadrature(panel_msh, Inti.GaussLegendre(n_quad_pt)) + + # Add regular refined quadrature contribution using the robust panel_quad + for ĵ in 1:n_quad_pt + quad_ĵ = panel_quad[ĵ] + quad_weight_ĵ = Inti.weight(quad_ĵ) + bary_coef = Barycentric_coef( + t_Leg_sub[ĵ]; + n = n_quad_pt, + t_interp = t_Leg_ref, + w = w_bary, + ) + + for j in 1:n_quad_pt + push!(Is, near_idx_i) + push!(Js, idx_global_corr + j) + push!( + Ls, + G(target_i, quad_ĵ) * quad_weight_ĵ * bary_coef[j], + ) + end + end + + # Add kernel split correction if needed + if ksplit_bool_vec[i][k_sub] == true + panel_node_a_complex = + panel_parametrization(subpanel_node1)[1] + + im * panel_parametrization(subpanel_node1)[2] + panel_node_b_complex = + panel_parametrization(subpanel_node2)[1] + + im * panel_parametrization(subpanel_node2)[2] + + panel_node_complex = [ + Inti.coords(q)[1] + im * Inti.coords(q)[2] for + q in panel_quad + ] + panel_normal_complex = [ + Inti.normal(q)[1] + im * Inti.normal(q)[2] for + q in panel_quad + ] + tj_sub = boundary_inv.(Inti.coords.(panel_quad)) + quad_velocity_weight = + (t_b - t_a) / 2 * (subpanel_node2 - subpanel_node1) / 2 .* + velocity_fn.(tj_sub) .* w_Leg_ref + + wcorrL, wcmpC = wLCinit( + panel_node_a_complex, + panel_node_b_complex, + target_node_i_complex, + panel_node_complex, + panel_normal_complex, + quad_velocity_weight, + n_quad_pt; + target_location = target_location, + ) + + for ĵ in 1:n_quad_pt + quad_ĵ = panel_quad[ĵ] + quad_weight_ĵ = Inti.weight(quad_ĵ) + bary_coef = Barycentric_coef( + t_Leg_sub[ĵ]; + n = n_quad_pt, + t_interp = t_Leg_ref, + w = w_bary, + ) + correction_term = + G_L(target_i, quad_ĵ) * quad_weight_ĵ * wcorrL[ĵ] + + G_C(target_i, quad_ĵ) * wcmpC[ĵ] + for j in 1:n_quad_pt + push!(Is, near_idx_i) + push!(Js, idx_global_corr + j) + push!(Ls, correction_term * bary_coef[j]) + end + end + end + end + end + end + end + end + + δL = sparse(Is, Js, Ls, n_target, n_source) + + # HACK: Clear Gmsh entities from Inti.meshgen in Inti to prevent memory leak + # This is a temporary workaround and should be removed once the underlying issue is fixed. + Inti.clear_entities!() + + return δL +end diff --git a/src/ksplit_utils.jl b/src/ksplit_utils.jl new file mode 100644 index 00000000..dc3ab3db --- /dev/null +++ b/src/ksplit_utils.jl @@ -0,0 +1,872 @@ +using SpecialFunctions +using SparseArrays +using StaticArrays +using OffsetArrays +using NearestNeighbors +using FastGaussQuadrature +using LinearAlgebra +using LinearMaps +using SpecialMatrices +using ClassicalOrthogonalPolynomials + +""" + WfrakLinit(trans, scale, tfrak, npt) -> Matrix{Float64} + +Computes the `npt × npt` product integration weight matrix `mathfrak(W)_L` for a +logarithmic singularity, as defined in Appendix A of Helsing & Holst (2015). + +This function is used for panel-to-panel (on-surface) corrections, where +`trans` and `scale` define the target panel's position relative to the source +panel in canonical coordinates. It computes the weights for all `npt` target +nodes against all `npt` source nodes simultaneously. + +This is called for self-panel corrections (with `trans=0`, `scale=1`) and +neighbor-panel corrections. + +See also [`helsing2015variants`](@ref). + +# Arguments +- `trans`: Translation of the target panel relative to the source panel, + defined as `(C_k - C_j) / H_j`. +- `scale`: Scaling of the target panel relative to the source panel, + defined as `H_k / H_j`. +- `tfrak`: Vector of `npt` canonical Gauss-Legendre nodes on `[-1, 1]` + (source nodes). +- `npt`: The number of quadrature points per panel. + +# Returns +- `Matrix{Float64}`: The `npt × npt` weight matrix `mathfrak(W)_L`. +""" + +# Weight matrix for product integration of log singularity when target = source +function WfrakLinit(trans, scale, tfrak, npt) + A = Vandermonde(tfrak) + tt = trans .+ scale .* tfrak + Q = zeros(npt, npt) + p = zeros(npt + 1) + c = (1 .- (-1) .^ (1:npt)) ./ (1:npt) + + for m in 1:npt + p[1] = log(abs((1 - tt[m]) / (1 + tt[m]))) + p1 = log(abs(1 - tt[m]^2)) + + for k in 1:npt + p[k + 1] = tt[m] * p[k] + c[k] + end + + Q[m, 1:2:(npt - 1)] = p1 .- p[2:2:npt] + Q[m, 2:2:npt] = p[1] .- p[3:2:(npt + 1)] + Q[m, :] ./= (1:npt) + end + + return Q / A +end + +""" + WfrakLinit(tt, tfrak, npt) -> Vector{Float64} + +Computes the `npt`-element product integration weight vector `mathfrak(w)_L` for a +logarithmic singularity for a single target point `tt`. + +This is a specialized, multiple-dispatch version used in adaptive kernel split. +It calculates the weights for one target point relative to all `npt` source nodes of a (sub)panel. + +# Arguments +- `tt`: The canonical coordinate `t` of the single target point, mapped into + the `[-1, 1]` domain of the source (sub)panel. +- `tfrak`: Vector of `npt` canonical Gauss-Legendre nodes on `[-1, 1]` + (representing the source nodes). +- `npt`: The number of quadrature points per panel. + +# Returns +- `Vector{Float64}`: The `npt`-element weight vector `mathfrak(w)_L` for the + target point `tt`. +""" + +# Weight matrix for product integration of log singularity when target = source +# taking the target node as the argument +function WfrakLinit(tt, tfrak, npt) + A = Vandermonde(tfrak) + q = zeros(npt) + p = zeros(npt + 1) + c = (1 .- (-1) .^ (1:npt)) ./ (1:npt) + + p[1] = log(abs((1 - tt) / (1 + tt))) + p1 = log(abs(1 - tt^2)) + + for k in 1:npt + p[k + 1] = tt * p[k] + c[k] + end + + q[1:2:(npt - 1)] = p1 .- p[2:2:npt] + q[2:2:npt] = p[1] .- p[3:2:(npt + 1)] + q ./= (1:npt) + + return A' \ q +end + +""" + wLCinit(ra, rb, r, rj, nuj, rpwj, npt; target_location) -> Tuple + +Computes the log-correction weights (`wcorrL`) and Cauchy-compensation +weights (`wcmpC`) for an off-surface target point `r`. + +This function is a Julia implementation of the MATLAB code from Appendix B +of Helsing & Holst (2015). + +Crucially, this version is generalized to handle the complex logarithm's +branch cut for both interior and exterior problems via the +`target_location` argument. The sign convention for the +`target_location = :inside` correction is reversed from the +exterior problem presented in the paper to correctly handle interior problems. + +See also [`helsing2015variants`](@ref). + +# Arguments +- `ra`, `rb`: Complex coordinates of the source panel's start and end points. +- `r`: Complex coordinate of the single off-surface target point. +- `rj`: Vector of `npt` complex coordinates of the source quadrature nodes. +- `nuj`: Vector of `npt` complex unit normals at the source nodes. +- `rpwj`: Vector of `npt` complex weighted velocity values (`r'(t_j) * w_j`). +- `npt`: The number of quadrature points. +- `target_location`: A `Symbol` (e.g., `:inside` or `:outside`) specifying + the problem domain relative to the boundary. This is required to + apply the correct branch cut rule for the complex logarithm. + +# Returns +- `(wcorrL, wcmpC)`: A tuple containing: + - `wcorrL`: An `npt`-element `Vector{Float64}` of log-correction weights. + - `wcmpC`: An `npt`-element `Vector{Float64}` of Cauchy-compensation weights. +""" + +# Weight and compensation matrix for product integration of log and Cauchy singularity for generic target +function wLCinit(ra, rb, r, rj, nuj, rpwj, npt; target_location) + dr = (rb - ra) / 2 + rtr = (r - (rb + ra) / 2) / dr + rjtr = (rj .- (rb + ra) / 2) ./ dr + + # Construct the Vandermonde matrix + A = transpose(Vandermonde(rjtr)) + + # Initialize variables + p = zeros(ComplexF64, npt + 1) + q = zeros(ComplexF64, npt) + c = (1 .- (-1) .^ (1:npt)) ./ (1:npt) + + # Compute the logarithmic expansion for the target point + p[1] = log(Complex(1 - rtr)) - log(Complex(-1 - rtr)) + p1 = log(Complex(1 - rtr) * (-1 - rtr)) + + # Apply logarithmic branch cut correction + if target_location == :inside + if imag(rtr) < 0 && abs(real(rtr)) < 1 + p[1] += 2im * π + p1 -= 2im * π + end + elseif target_location == :outside + if imag(rtr) > 0 && abs(real(rtr)) < 1 + p[1] -= 2im * π + p1 += 2im * π + end + end + + # Compute recurrence relation for expansion terms + for k in 1:npt + p[k + 1] = rtr * p[k] + c[k] + end + + # Construct weight correction vector q (Helsing's code) + q[1:2:(npt - 1)] = p1 .- p[2:2:npt] + q[2:2:npt] = p[1] .- p[3:2:(npt + 1)] + + q ./= (1:npt) + + # Solve for weight corrections wcorrL + wcorrL = + real(A \ q * dr .* conj(im * nuj)) ./ abs.(rpwj) .+ + (log(abs(dr)) .- log.(abs.(rj .- r))) + + wcmpC = imag(A \ -p[1:npt]) - imag(rpwj ./ (r .- rj)) + + return wcorrL, wcmpC +end + +# Constant matrix associated with interpolation using Legendre polynomials +function L_Legendre_matrix(n) + L_Leg = Matrix{Float64}(undef, n, n) + t_Leg_ref, w_Leg_ref = gausslegendre(n) + for l in 0:(n - 1) + for m in 1:n + L_Leg[l + 1, m] = (2 * l + 1) / 2 * legendrep(l, t_Leg_ref[m]) * w_Leg_ref[m] + end + end + return L_Leg +end + +# derivative of Legendre polynomials +function Legendre_derivative(l, t) + if l == 0 + return 0 + else + if t == 1 || t == -1 + error("Derivative of Legendre polynomials is not defined at t = ±1 in Legendre_derivative") + end + return l / (t^2 - 1) * (t * legendrep(l, t) - legendrep(l - 1, t)) + end +end + +# Interpolation using Legendre polynomials +function curve_interpolate(x_vec, L_leg) + n = length(x_vec) + γ_coef = L_leg * x_vec + Pₙ_vec = [t -> γ_coef[l + 1] * legendrep(l, t) for l in 0:(n - 1)] + Pₙ = t -> mapreduce(f -> f(t), +, Pₙ_vec) + return Pₙ +end + +# Derivative of the interpolation using Legendre polynomials +function curve_interpolate_derivative(x_vec, L_leg) + n = length(x_vec) + γ_coef = L_leg * x_vec + Pₙ_deriv_vec = [t -> γ_coef[l + 1] * Legendre_derivative(l, t) for l in 0:(n - 1)] + Pₙ_deriv = t -> mapreduce(f -> f(t), +, Pₙ_deriv_vec) + return Pₙ_deriv +end + +function NewtonRaphson(f, fp, x0; tol = 1.0e-8, maxIter = 1000) + x = x0 + fx = f(x0) + iter = 0 + while abs(fx) > tol && iter < maxIter + x = x - fx / fp(x) + fx = f(x) + iter += 1 + end + return x +end + +# Get the n-point Gauss-Legendre quadrature nodes. +function LegendreNodes(n) + xnodes, _ = gausslegendre(n) + return xnodes +end + +# Compute the barycentric weights for polynomial interpolation on the n-point Legendre nodes. +function LegendreBaryWeights(n) + xnodes = LegendreNodes(n) + xweights = ones(n) + for j in 1:n + term_j = 1 + for i in 1:n + i == j && continue + term_j *= (xnodes[j] - xnodes[i]) + end + xweights[j] = 1 / term_j + end + return xweights +end + +# Compute the barycentric weights for polynomial interpolation on a given set of nodes. +function InterpolationBaryWeights(xnodes, n) + xweights = ones(n) + for j in 1:n + term_j = 1 + for i in 1:n + i == j && continue + term_j *= xnodes[j] - xnodes[i] + end + xweights[j] = 1 / term_j + end + return xweights +end + +# Compute the barycentric interpolation coefficients for a single target point. +function Barycentric_coef(t_target; n = 16, t_interp = nothing, w = nothing) + if t_interp === nothing + t_interp = LegendreNodes(n) + end + if w === nothing + w = LegendreBaryWeights(n) + end + + if length(t_interp) != n || length(w) != n + error("Length of t_interp and w must match n") + end + + coef = Vector{Float64}(undef, n) + + for j in 1:n + if abs(t_target - t_interp[j]) < 1.0e-15 + coef .= 0 + coef[j] = 1 + return coef + end + coef[j] = w[j] / (t_target - t_interp[j]) + end + + coef_sum = sum(coef) + coef ./= coef_sum + + return coef +end + +# scale function (from [-1,1] to [t_a,t_b]) +function scale_fn(t_a, t_b, vec_ref; node = true) + return if node == true + (t_b - t_a) / 2 .* vec_ref .+ (t_b + t_a) / 2 + else + (t_b - t_a) / 2 .* vec_ref + end +end + +# Compute the n-th harmonic number (H_n = 1 + 1/2 + ... + 1/n). +function harmonic_sum(n::Int) + val = 0 + if n == 0 + return val + else + for i in range(1, n) + val += 1 / i + end + return val + end +end + +γ = Base.MathConstants.γ # Euler-Mascheroni constant + +# kernel split for the Helmholtz equation +# single layer kernel splitting +function hankelh1_0_smooth(z; tol = 1.0e-15, n_term = 10000) + term = ComplexF64[] + push!(term, (1 + 2 * im / π * (γ - log(2))) * besselj0(z)) + push!(term, 2 * im / π * 1 / 4 * z^2) + idx = 2 + + while maximum([abs(term[idx]) / abs(term[1]), abs(term[idx])]) > tol && idx <= n_term + idx += 1 + push!(term, (-1)^idx * 2 * im / π * harmonic_sum(idx - 1)) + for i in range(1, idx - 1) + term[idx] *= (1 / 4 * z^2) / i^2 + end + end + + term_real = real(term) + term_imag = imag(term) + term_sum = sum(sort(term_real; by = abs)) + im * sum(sort(term_imag; by = abs)) + + return term_sum +end + +# double layer kernel splittings +function hankelh1_1_smooth(z; tol = 1.0e-15, n_term = 10000) + term = ComplexF64[] + push!(term, (1 - 2 * im / π * log(2)) * besselj1(z)) + push!(term, -im / (2π) * z * (digamma(1) + digamma(2))) + + idx = 2 + while maximum([abs(term[idx]) / abs(term[1]), abs(term[idx])]) > tol && idx <= n_term + idx += 1 + push!(term, (-1)^(idx - 1) * im / (2π) * z * (digamma(idx - 1) + digamma(idx))) + for i in range(1, idx - 2) + term[idx] *= (1 / 4 * z^2) / (i * (i + 1)) + end + end + + term_real = real(term) + term_imag = imag(term) + term_sum = sum(sort(term_real; by = abs)) + im * sum(sort(term_imag; by = abs)) + + return term_sum +end + +# kernel split for the modified Helmholtz equation +# single layer kernel splitting +function besselk0_smooth(z; tol = 1.0e-15, n_term = 10000) + term = ComplexF64[] + push!(term, (log(2) - γ) * besseli(0, z)) + push!(term, 1 / 4 * z^2) + idx = 2 + + while maximum([abs(term[idx]) / abs(term[1]), abs(term[idx])]) > tol && idx <= n_term + push!(term, harmonic_sum(idx)) + for i in range(1, idx) + term[idx + 1] *= (1 / 4 * z^2) / i^2 + end + idx += 1 + end + + term_real = real(term) + term_imag = imag(term) + term_sum = sum(sort(term_real; by = abs)) + im * sum(sort(term_imag; by = abs)) + + return term_sum +end + +# double layer kernel splitting +function besselk1_smooth(z; tol = 1.0e-15, n_term = 10000) + term = ComplexF64[] + push!(term, -log(2) * besseli(1, z)) + push!(term, -1 / 4 * z * (digamma(1) + digamma(2))) + idx = 2 + + while maximum([abs(term[idx]) / abs(term[1]), abs(term[idx])]) > tol && idx <= n_term + push!(term, -1 / 4 * z * (digamma(idx) + digamma(idx + 1))) + for i in range(1, idx - 1) + term[idx + 1] *= (1 / 4 * z^2) / (i * (i + 1)) + end + idx += 1 + end + + term_real = real(term) + term_imag = imag(term) + term_sum = sum(sort(term_real; by = abs)) + im * sum(sort(term_imag; by = abs)) + + return term_sum +end + +# Find target points `X` near source elements in `Y` within `maxdist`. +function near_points_vec(X, Y::Inti.Quadrature; maxdist = 0.1) + x = [Inti.coords(q) for q in X] + y = [Inti.coords(q) for q in Y] + + kdtree = KDTree(y) + dict = Dict(j => Set{Int}() for j in 1:length(y)) + + for i in eachindex(x) + qtags = inrange(kdtree, x[i], maxdist) + for qtag in qtags + push!(dict[qtag], i) + end + end + + etype2nearlist = Dict{DataType, Vector{Vector{Int}}}() + for (E, tags) in Y.etype2qtags + nq, ne = size(tags) + nearlist = [Set{Int}() for _ in 1:ne] + + for j in 1:ne + for q in 1:nq + qtag = tags[q, j] + union!(nearlist[j], dict[qtag]) + end + end + + # Convert Sets to Vectors for the final output + etype2nearlist[E] = [collect(s) for s in nearlist] + end + + return etype2nearlist +end + +# local mesh refinement (non-recursive implementation) +function create_subdivision(z, h, α, Cε, Rε) + Δt_max = 2 * Cε / (α * h) + result = Tuple[] # To store valid subintervals + ksplit_bool = Bool[] # Store vector of bools to indicate whether kernel split is needed + stack_val = [] # Stack for iterative processing + + if abs(real(z)) >= 1 + # Add the entire interval to the stack for recursive bisection + push!(stack_val, (-1, 1, Rε, Δt_max, z)) + else + # Compute the center interval + Δt_direct = 4 * abs(imag(z)) * Rε / (Rε^2 - 1) + Δt_edge = 2 * (1 - abs(real(z))) + + Δt_c = min(Δt_edge, max(Δt_direct, Δt_max)) + + ta = real(z) - Δt_c / 2 + tb = real(z) + Δt_c / 2 + + if Δt_c <= Δt_direct + push!(result, (ta, tb)) + push!(ksplit_bool, false) + elseif Δt_c <= Δt_max + push!(result, (ta, tb)) + push!(ksplit_bool, true) + else + error("Invalid condition for kernel split") + end + + # Add the left and right subintervals to the stack + push!(stack_val, (-1, ta, Rε, Δt_max, z)) + push!(stack_val, (tb, 1, Rε, Δt_max, z)) + end + + # Process the stack iteratively using recursive_bisection logic + while !isempty(stack_val) + t1, t2, Rε, Δt_max, z = pop!(stack_val) + recursive_bisection!(t1, t2, Rε, Δt_max, z, result, ksplit_bool, stack_val) + end + + return result, ksplit_bool +end + +# Recursive bisection helper function +function recursive_bisection!(t1, t2, Rε, Δt_max, z, result, ksplit_bool, stack_val) + return if t1 < t2 + Δt_sub = t2 - t1 + z_sub = 2 * (z - t1) / Δt_sub - 1 # Transformation for preimage + + if ρ(z_sub) > Rε + push!(result, (t1, t2)) + push!(ksplit_bool, false) + return + elseif Δt_sub <= Δt_max + push!(result, (t1, t2)) + push!(ksplit_bool, true) + return + else + t_mid = t1 + Δt_sub / 2 + push!(stack_val, (t1, t_mid, Rε, Δt_max, z)) + push!(stack_val, (t_mid, t2, Rε, Δt_max, z)) + return + end + end +end + +# Compute the radius of Bernstein ellipse for a given complex number z +# relative to the interval [-1, 1]. +function ellip_rad(z) + return abs(z + sqrt(complex(z + 1)) * sqrt(complex(z - 1))) +end + +ρ = z -> ellip_rad(z) + +# Generate the Newton function for root finding +function newton_fn_generator(panel_interpolant, x_target)::Function + function newton_fn(t::ComplexF64) + return panel_interpolant(t) - x_target + end + return newton_fn +end + +# Generate the panel parametrization function +function panel_parametrization_fn(el) + _body = t -> SVector(el(scale_fn(0, 1, t))) + + # multiple dispatch to handle different input types + function panel_parametrization(t::Float64)::SVector{2, Float64} + return _body(t) + end + + function panel_parametrization(t) + return _body(t) + end + + return panel_parametrization +end + +""" + adaptive_refinement( + source_panel, + source_el, + L_Leg, + α, + Cε, + Rε, + target; + affine_preimage::Bool=true, + ) + +Implements the per-target adaptive refinement algorithm from Fryklund et al. (2022) +to ensure accurate kernel-split quadrature for parameter-dependent PDEs (e.g., +modified Helmholtz) where the parameter `α` is large. + +For each target point, the algorithm computes its complex preimage `z` on the +source panel's canonical interval `[-1, 1]`. It then generates a list of +subintervals, `(t_a, t_b)`, that are "acceptable" for quadrature. + +The algorithm (from `create_subdivision` and `recursive_bisection!`) +iteratively bisects the interval `[-1, 1]` until every resulting subinterval +`(t1, t2)` is "acceptable". An interval is deemed acceptable if it meets one +of two stopping conditions: + +1. Well-Separated (Standard Quadrature): The interval is sufficiently + far from the target's preimage `z`, relative to its own length. + - Condition: `ρ(z_sub) > Rε` + - Action: Add `(t1, t2)` to the list. Mark as `ksplit_bool = false`. + +2. Sufficiently Small (Kernel-Split Quadrature): The interval is + close to `z` (`ρ(z_sub) <= Rε`), but it is small enough for the + kernel-split quadrature to be accurate given the large `α` parameter. + - Condition: `Δt_sub <= Δt_max` (where `Δt_max = 2*Cε / (α*h)`) + - Action: Add `(t1, t2)` to the list. Mark as `ksplit_bool = true`. + +If an interval is both close and too large, it is bisected, and its +children are checked again. + +This process ensures that the smooth coefficient functions (like `G^L`) of the +kernel-split are accurately resolved by polynomials on each subpanel, +preventing the accuracy degradation that occurs when `α*h` is large. + +See also [`fryklund2022adaptive`](@ref). + +# Arguments +- `source_panel`: The `Quadrature` object for the source panel. Used to compute + the panel's total arc length `h`. +- `source_el`: The parametrization function for the source panel. +- `L_Leg`: Precomputed Legendre interpolation matrix, used only if + `affine_preimage=false`. +- `α`: The PDE parameter (e.g., `λ` in `(Δ - λ²)u = 0`). +- `Cε`: The accuracy constant used to determine the max subinterval length + `Δt_max` (from Eq. 75). +- `Rε`: The cutoff radius for the near-field criterion (from Eq. 72). +- `target`: A vector of target points to generate subdivisions for. +- `affine_preimage`: + - `true` (default): Use a fast, direct affine map to find the preimage `z`. + - `false`: Use the affine map as an initial guess, then refine `z` with + Newton's method on a polynomial panel interpolant. + +# Returns +- `panel_subdivision::Vector{Vector{Tuple}}`: A list where `panel_subdivision[j]` + is the vector of `(t_start, t_end)` subintervals on `[-1, 1]` for `target[j]`. +- `ksplit_bool::Vector{Vector{Bool}}`: A list where `ksplit_bool[j][k]` is `true` + if the `k`-th subinterval for `target[j]` requires kernel-split quadrature. +""" +function adaptive_refinement( + source_panel, + source_el, + L_Leg, + α, + Cε, + Rε, + target; + affine_preimage::Bool = true, + ) + # Convert target points to complex numbers + nᵢ = length(target) + target_complex = Vector{ComplexF64}(undef, nᵢ) + for i in axes(target)[1] + x = Inti.coords(target[i]) + target_complex[i] = x[1] + im * x[2] + end + + # Get panel endpoints (needed for both affine and interpolant methods) + panel_parametrization = t -> source_el(scale_fn(0, 1, t)) + z_a_complex = panel_parametrization(-1)[1] + im * panel_parametrization(-1)[2] + z_b_complex = panel_parametrization(1)[1] + im * panel_parametrization(1)[2] + + # Pre-calculate affine map components for efficiency + z_mid = (z_a_complex + z_b_complex) / 2 + z_diff = (z_b_complex - z_a_complex) + + panel_arc_length = sum(q -> Inti.weight(q), source_panel) + panel_subdivision = Vector{Tuple}[] + ksplit_bool = Vector{Bool}[] + + # Construct interpolant if needed + local panel_interpolant, panel_interpolant_deriv + if affine_preimage == false + # This setup is only performed if the interpolant method is used. + qnodes_i = [Inti.coords(qnodes) for qnodes in source_panel] + qnodes_i_complex = [nodes[1] + im * nodes[2] for nodes in qnodes_i] + + panel_interpolant = curve_interpolate(qnodes_i_complex, L_Leg) + panel_interpolant_deriv = curve_interpolate_derivative(qnodes_i_complex, L_Leg) + end + + # Loop over target points to compute preimages and subdivisions + for j in axes(target_complex)[1] + x_target = target_complex[j] + local z_approx # Ensure z_approx is in the loop's scope + + if affine_preimage == false + # Calculate the robust initial guess (affine inverse) + initial_guess = 2 * (x_target - z_mid) / z_diff + + # Refine the guess using the polynomial interpolant + newton_nonlinear_fn = newton_fn_generator(panel_interpolant, x_target) + z_approx = NewtonRaphson( + newton_nonlinear_fn, + panel_interpolant_deriv, + initial_guess; + tol = 1.0e-14, + ) + else + # Default: Use Affine Inverse Directly + z_approx = 2 * (x_target - z_mid) / z_diff + end + + # Subdivide the panel based on the preimage + panel_subdivision_temp_j, ksplit_bool_temp_j = + create_subdivision(z_approx, panel_arc_length, α, Cε, Rε) + + push!(panel_subdivision, panel_subdivision_temp_j) + push!(ksplit_bool, ksplit_bool_temp_j) + end + + return panel_subdivision, ksplit_bool +end + +function log_punctured(x, y) + d = norm(x - y) + d ≤ Inti.SAME_POINT_TOLERANCE && return 0 + return log(d) +end + +""" + build_neighbor_information(source_el, n_el; tol=1e-12) + +Builds connectivity maps for a set of panels (elements). + +# Returns +- `panel_to_nodes_map`: A vector where the `i`-th element is a tuple `(start_node_id, end_node_id)` for the `i`-th panel. +- `node_to_panel_map`: A vector of vectors where the `j`-th element is a list of panel indices connected to the `j`-th unique node. +""" +function build_neighbor_information(source_el, n_el; tol = 1.0e-12) + # Collect all unique nodes and map panel endpoints to unique node IDs + all_nodes = Vector{typeof(source_el[1](0))}() + panel_to_nodes_map = Vector{Tuple{Int, Int}}(undef, n_el) + + for i in 1:n_el + start_node = source_el[i](0) + end_node = source_el[i](1) + + start_id = findfirst(n -> norm(n - start_node) < tol, all_nodes) + if isnothing(start_id) + push!(all_nodes, start_node) + start_id = length(all_nodes) + end + + end_id = findfirst(n -> norm(n - end_node) < tol, all_nodes) + if isnothing(end_id) + push!(all_nodes, end_node) + end_id = length(all_nodes) + end + + panel_to_nodes_map[i] = (start_id, end_id) + end + + # Build a map from each unique node ID to the panels it connects + n_unique_nodes = length(all_nodes) + node_to_panel_map = [Vector{Int}() for _ in 1:n_unique_nodes] + + for i in 1:n_el + start_id, end_id = panel_to_nodes_map[i] + push!(node_to_panel_map[start_id], i) + push!(node_to_panel_map[end_id], i) + end + + for (node_id, panels) in enumerate(node_to_panel_map) + if length(panels) != 2 + @warn "Node ID $node_id connects $(length(panels)) panels. Expected 2 for a simple closed curve." + end + end + + return panel_to_nodes_map, node_to_panel_map +end + +""" + _get_ksplit_kernels(op, layer_type, T) + +Returns a NamedTuple (G_S, G_L, G_C) containing the smooth part, +logarithmic singularity, and Cauchy singularity of the kernel split for a +given operator, layer type, and operator element type `T`. The formula +is given as follows: + +G(x, y) = G_S(x, y) + G_L(x, y) log|x - y| + G_C(x, y) frac{(x - y) ⋅ n(y)}{|x - y|^2} + +""" +function _get_ksplit_kernels(op, layer_type, T) + if op isa Inti.Laplace{2} + if layer_type == :single + G_S = (x, y) -> 0 + G_L = (x, y) -> -1 / (2π) + G_C = (x, y) -> 0 + elseif layer_type == :double + G_S = (x, y) -> 0 + G_L = (x, y) -> 0 + G_C = (x, y) -> 1 / (2π) + else + error("Unsupported layer type for Laplace operator") + end + elseif op isa Inti.Helmholtz{2, Float64} + k = op.k + if layer_type == :single + G_S = + (x, y) -> begin + x_coor = Inti.coords(x) + y_coor = Inti.coords(y) + r = norm(x_coor - y_coor) + im / 4 * hankelh1_0_smooth(k * r) - 1 / (2π) * besselj0(k * r) * log(k) + end + G_L = (x, y) -> begin + x_coor = Inti.coords(x) + y_coor = Inti.coords(y) + r = norm(x_coor - y_coor) + -1 / (2π) * besselj0(k * r) + end + G_C = (x, y) -> 0 + elseif layer_type == :double + G_S = + (x, y) -> begin + x_coor = Inti.coords(x) + y_coor = Inti.coords(y) + r = norm(x_coor - y_coor) + ny = Inti.normal(y) + (r ≤ Inti.SAME_POINT_TOLERANCE) && return zero(T) + im * k / 4 * + (hankelh1_1_smooth(k * r) + 2 * im / π * besselj1(k * r) * log(k)) * + dot(x_coor - y_coor, ny) / r + end + G_L = (x, y) -> begin + x_coor = Inti.coords(x) + y_coor = Inti.coords(y) + r = norm(x_coor - y_coor) + ny = Inti.normal(y) + (r ≤ Inti.SAME_POINT_TOLERANCE) && return zero(T) + -k / (2π) * besselj1(k * r) * dot(x_coor - y_coor, ny) / r + end + G_C = (x, y) -> 1 / (2π) + else + error("Unsupported layer type for Helmholtz operator") + end + elseif op isa Inti.Yukawa{2, Float64} + λ = op.λ + if layer_type == :single + G_S = + (x, y) -> begin + x_coor = Inti.coords(x) + y_coor = Inti.coords(y) + r = norm(x_coor - y_coor) + 1 / (2π) * besselk0_smooth(λ * r) - + 1 / (2π) * besseli(0, λ * r) * log(λ) + end + G_L = (x, y) -> begin + x_coor = Inti.coords(x) + y_coor = Inti.coords(y) + r = norm(x_coor - y_coor) + -1 / (2π) * besseli(0, λ * r) + end + G_C = (x, y) -> 0 + elseif layer_type == :double + G_S = + (x, y) -> begin + x_coor = Inti.coords(x) + y_coor = Inti.coords(y) + r = norm(x_coor - y_coor) + ny = Inti.normal(y) + (r ≤ Inti.SAME_POINT_TOLERANCE) && return zero(T) + λ / (2π) * + (besselk1_smooth(λ * r) + besseli(1, λ * r) * log(λ)) * + dot(x_coor - y_coor, ny) / r + end + G_L = (x, y) -> begin + x_coor = Inti.coords(x) + y_coor = Inti.coords(y) + r = norm(x_coor - y_coor) + ny = Inti.normal(y) + (r ≤ Inti.SAME_POINT_TOLERANCE) && return zero(T) + λ / (2π) * besseli(1, λ * r) * dot(x_coor - y_coor, ny) / r + end + G_C = (x, y) -> 1 / (2π) + else + error("Unsupported layer type for Yukawa operator") + end + else + error("Operator type not supported") + end + + return (G_S = G_S, G_L = G_L, G_C = G_C) +end diff --git a/src/utils.jl b/src/utils.jl index 355f8d22..763c975a 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -198,7 +198,7 @@ function _normalize_compression(compression, target, source) end function _normalize_correction(correction, target, source) - methods = (:dim, :adaptive, :none) + methods = (:dim, :adaptive, :none, :ksplit, :ksplit_adaptive) # check that method is valid correction.method ∈ methods || error("Unknown correction.method $(correction.method). Available options: $methods") diff --git a/test/kernel_split_test.jl b/test/kernel_split_test.jl new file mode 100644 index 00000000..ad3264c5 --- /dev/null +++ b/test/kernel_split_test.jl @@ -0,0 +1,220 @@ +using Inti +using Gmsh +using SpecialFunctions +using StaticArrays +using HMatrices +using IterativeSolvers +using FastGaussQuadrature +using LinearAlgebra +using LinearMaps +using Meshes +# using GLMakie + +# 1. PARAMETER AND GEOMETRY SETUP +println("Setting up test parameters and geometry...") + +# PDE and Discretization Parameters +λ = 30.0 * π +op = Inti.Yukawa(; dim = 2, λ) +meshsize = 0.1 +n_quad_pts = 16 +hmatrix_tol = 1.0e-12 +gmres_tol = 1.0e-14 + +# Helper function for boundary inverse +angle_mod = x -> mod(angle(x), 2π) + +# Define Starfish Geometry +r₀ = 1.0 # mean radius +a = 0.3 # amplitude of wobble +ω = 5 # frequency of wobble +θ₀ = 0.2 # rotation angle for the starfish shape +starfish_rad = (θ) -> r₀ + a * cos(ω * (2π * θ - θ₀)) # radius function for starfish shape +starfish_rad_p = (θ) -> -a * 2π * ω * sin(ω * (2π * θ - θ₀)) # derivative of the radius function +starfish_v = (θ) -> (starfish_rad_p(θ) + im * 2π * starfish_rad(θ)) * exp(im * 2π * θ) # velocity function +starfish_s = (θ) -> norm(starfish_v(θ)) # speed function +starfish_v_x = (θ) -> real(starfish_v(θ)) # x-component of the velocity function +starfish_v_y = (θ) -> imag(starfish_v(θ)) # y-component of the velocity function + +starfish_rad_pp = (θ) -> -a * (4π^2) * ω^2 * cos(ω * (2π * θ - θ₀)) # second derivative of the radius function +starfish_vp = # derivative of the velocity function + (θ) -> +(starfish_rad_pp(θ) + 2 * im * 2π * starfish_rad_p(θ) - (4π^2) * starfish_rad(θ)) * + exp(im * 2π * θ) +starfish_vp_x = (θ) -> real(starfish_vp(θ)) # x-component of the derivative of velocity +starfish_vp_y = (θ) -> imag(starfish_vp(θ)) # y-component of the derivative of velocity + +function starfish_κ(θ) + return (starfish_v_x(θ) * starfish_vp_y(θ) - starfish_v_y(θ) * starfish_vp_x(θ)) / + (starfish_s(θ)^3) +end # curvature function + +# set the generic function name +v = θ -> starfish_v(θ) +s = θ -> starfish_s(θ) +κ = θ -> starfish_κ(θ) +boundary_inv = (θ) -> angle_mod(θ[1] + im * θ[2]) / (2π) +function starfish_coor(θ) + r = starfish_rad(θ) + return SVector(r * cos(2π * θ), r * sin(2π * θ)) +end + +# Generate and Curve Mesh +gmsh.initialize() +gmsh.option.setNumber("Mesh.MeshSizeMax", meshsize) +gmsh.option.setNumber("Mesh.MeshSizeMin", meshsize) + +bnd1 = Inti.gmsh_curve(starfish_coor, 0, 1; meshsize) +cl = gmsh.model.occ.addCurveLoop([bnd1]) +disk = gmsh.model.occ.addPlaneSurface([cl]) +gmsh.model.occ.synchronize() +gmsh.model.mesh.generate(2) +msh = Inti.import_mesh(; dim = 2) +gmsh.finalize() +Ω = Inti.Domain(Inti.entities(msh)) do ent + return Inti.geometric_dimension(ent) == 2 +end + +θ_smooth = 5 # smoothness order of curved elements +crvmsh = Inti.curve_mesh(msh, starfish_coor, θ_smooth) +# viz(crvmsh[Ω], showsegments=true, figure=(; size=(425, 400),)) # Optional visualization + +# Define Quadratures +Ω_crv_quad = Inti.Quadrature(crvmsh[Ω]; qorder = 10) +Γ = Inti.external_boundary(Ω) +Γ_crv_el = collect(Inti.elements(crvmsh[Γ])) +Γ_crv_quad = Inti.Quadrature(crvmsh[Γ], Inti.GaussLegendre(n_quad_pts)) +Γ_crv_quad_connectivity = Inti.etype2qtags(Γ_crv_quad, first(Inti.element_types(crvmsh[Γ]))) +n_el = length(Γ_crv_el) +@info "Geometry setup complete. n_elements = $n_el, n_quad_pts = $n_quad_pts" + +# Define Exact Solution +uₑₓ = (x) -> 1.0e12 * besselk(0, λ * norm((x .- (1.5, 0)), 2)) +g = uₑₓ + +g_vec = map(q -> g(q.coords), Γ_crv_quad) +u_sol_ex = map(q -> uₑₓ(Inti.coords(q)), Ω_crv_quad) + +# 2. TEST 1: STANDARD KERNEL SPLIT +println("\n" * "="^30) +println("Running Test 1: Standard Kernel Split") +println("="^30) + +# B2B Operator (Standard KSplit) +ksplit_b2b_settings = ( + method = :ksplit, + connectivity = Γ_crv_quad_connectivity, + elements = Γ_crv_el, + velocity_fn = v, + curvature_fn = κ, + boundary_inv = boundary_inv, + PARAMETRIC_LENGTH = 1.0, + n_panel_corr = 3, +) + +@time S_b2b_ks, D_b2b_ks = Inti.single_double_layer(; + op = op, + target = Γ_crv_quad, + source = Γ_crv_quad, + compression = (method = :hmatrix, tol = hmatrix_tol), + correction = ksplit_b2b_settings, +) + +# B2D Operator (Standard KSplit) +ksplit_b2d_settings = ( + method = :ksplit, + connectivity = Γ_crv_quad_connectivity, + elements = Γ_crv_el, + velocity_fn = v, + curvature_fn = κ, + boundary_inv = boundary_inv, + PARAMETRIC_LENGTH = 1.0, + maxdist = 1.1 * meshsize, + target_location = :inside, + # affine_preimage=false, # default is true +) + +@time S_b2d_ks, D_b2d_ks = Inti.single_double_layer(; + op = op, + target = Ω_crv_quad, + source = Γ_crv_quad, + compression = (method = :hmatrix, tol = hmatrix_tol), + correction = ksplit_b2d_settings, +) + +# Solve and Check Error (Standard KSplit) +L_b2b_ks = -I / 2 + D_b2b_ks +L_b2d_ks = D_b2d_ks + +println("Solving system (Standard Kernel Split)...") +σ_ks = gmres(L_b2b_ks, g_vec; reltol = gmres_tol, abstol = gmres_tol, verbose = true, restart = 1000) +u_sol_ks = L_b2d_ks * σ_ks + +er_ks = u_sol_ks - u_sol_ex +er_norm_ks = abs.(er_ks / maximum(abs.(u_sol_ex))) +println("KSplit Max Relative Error: ", norm(er_norm_ks, Inf)) + +# 3. TEST 2: ADAPTIVE KERNEL SPLIT +println("\n" * "="^30) +println("Running Test 2: Adaptive Kernel Split") +println("="^30) + +# B2B Operator (Adaptive KSplit) +ksplit_adaptive_b2b_settings = ( + method = :ksplit_adaptive, + connectivity = Γ_crv_quad_connectivity, + elements = Γ_crv_el, + velocity_fn = v, + curvature_fn = κ, + boundary_inv = boundary_inv, + PARAMETRIC_LENGTH = 1.0, + n_panel_corr = 3, + Cε = 3.5, + Rε = 3.7, + target_location = :inside, + # affine_preimage=false, # default is true +) + +@time S_b2b_aks, D_b2b_aks = Inti.single_double_layer(; + op = op, + target = Γ_crv_quad, + source = Γ_crv_quad, + compression = (method = :hmatrix, tol = hmatrix_tol), + correction = ksplit_adaptive_b2b_settings, +) + +# B2D Operator (Adaptive KSplit) +ksplit_adaptive_b2d_settings = ( + method = :ksplit_adaptive, + connectivity = Γ_crv_quad_connectivity, + elements = Γ_crv_el, + velocity_fn = v, + curvature_fn = κ, + boundary_inv = boundary_inv, + PARAMETRIC_LENGTH = 1.0, + maxdist = 1.1 * meshsize, + Cε = 3.5, + Rε = 3.7, + target_location = :inside, + # affine_preimage=false, # default is true +) + +@time S_b2d_aks, D_b2d_aks = Inti.single_double_layer(; + op = op, + target = Ω_crv_quad, + source = Γ_crv_quad, + compression = (method = :hmatrix, tol = hmatrix_tol), + correction = ksplit_adaptive_b2d_settings, +) + +# Solve and Check Error (Adaptive KSplit) +L_b2b_aks = -I / 2 + D_b2b_aks +L_b2d_aks = D_b2d_aks + +println("Solving system (Adaptive Kernel Split)...") +σ_aks = gmres(L_b2b_aks, g_vec; reltol = gmres_tol, abstol = gmres_tol, verbose = true, restart = 1000) +u_sol_aks = L_b2d_aks * σ_aks + +er_aks = u_sol_aks - u_sol_ex +er_norm_aks = abs.(er_aks / maximum(abs.(u_sol_ex))) +println("Adaptive KSplit Max Relative Error: ", norm(er_norm_aks, Inf))