From c5384a2603baa339600ab6d10285bb30d2de2cb3 Mon Sep 17 00:00:00 2001 From: arduano Date: Mon, 23 Mar 2026 00:24:50 +1100 Subject: [PATCH] test: add f64 inverse-trig boundary adversarial coverage --- .../simd_math_targeted_edges/inverse_trig.rs | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/src/tests/simd_math_targeted_edges/inverse_trig.rs b/src/tests/simd_math_targeted_edges/inverse_trig.rs index 36e6a91..24accd1 100644 --- a/src/tests/simd_math_targeted_edges/inverse_trig.rs +++ b/src/tests/simd_math_targeted_edges/inverse_trig.rs @@ -280,6 +280,85 @@ fn run_f64_inverse_trig_identity() { const TAN_PI_8_F64: f64 = 0.414_213_562_373_095_03; const TAN_3PI_8_F64: f64 = 2.414_213_562_373_095; +fn run_f64_inverse_trig_fallback_boundary() { + // Boundary used by portable_f64 fallback patching for asin/acos near |x|≈1. + let boundary = f64::from_bits(0x3FEF_F000_0000_0000); + let inputs = [ + f64::from_bits(boundary.to_bits() - 2), + f64::from_bits(boundary.to_bits() - 1), + boundary, + f64::from_bits(boundary.to_bits() + 1), + f64::from_bits(boundary.to_bits() + 2), + -f64::from_bits(boundary.to_bits() - 2), + -f64::from_bits(boundary.to_bits() - 1), + -boundary, + -f64::from_bits(boundary.to_bits() + 1), + -f64::from_bits(boundary.to_bits() + 2), + ]; + + for chunk in inputs.chunks(S::Vf64::WIDTH) { + let v = S::Vf64::load_from_slice(chunk); + let asin = v.asin_u35(); + let acos = v.acos_u35(); + + for (lane, &x) in chunk.iter().enumerate() { + assert_f64_contract( + "asin_u35 boundary", + x, + asin[lane], + x.asin(), + contracts::ASIN_U35_F64_MAX_ULP, + ) + .unwrap_or_else(|e| panic!("{e}")); + assert_f64_contract( + "acos_u35 boundary", + x, + acos[lane], + x.acos(), + contracts::ACOS_U35_F64_MAX_ULP, + ) + .unwrap_or_else(|e| panic!("{e}")); + } + } +} + +fn run_f64_atan_reduction_threshold_neighbors() { + // Thresholds used by atan range reduction in portable_f64. + let t1 = TAN_PI_8_F64; + let t2 = TAN_3PI_8_F64; + let inputs = [ + f64::from_bits(t1.to_bits() - 2), + f64::from_bits(t1.to_bits() - 1), + t1, + f64::from_bits(t1.to_bits() + 1), + f64::from_bits(t1.to_bits() + 2), + f64::from_bits(t2.to_bits() - 2), + f64::from_bits(t2.to_bits() - 1), + t2, + f64::from_bits(t2.to_bits() + 1), + f64::from_bits(t2.to_bits() + 2), + ]; + + for chunk in inputs.chunks(S::Vf64::WIDTH) { + let v = S::Vf64::load_from_slice(chunk); + let atan = v.atan_u35(); + let atan_neg = (-v).atan_u35(); + + for (lane, &x) in chunk.iter().enumerate() { + assert_f64_contract( + "atan_u35 threshold", + x, + atan[lane], + x.atan(), + contracts::ATAN_U35_F64_MAX_ULP, + ) + .unwrap_or_else(|e| panic!("{e}")); + assert_f64_contract("atan_u35 threshold odd", -x, atan_neg[lane], -atan[lane], 2) + .unwrap_or_else(|e| panic!("{e}")); + } + } +} + simd_math_targeted_all_backends!(f64_inverse_trig_near_one, run_f64_inverse_trig_near_one); simd_math_targeted_all_backends!( f64_inverse_trig_special_lanes, @@ -287,3 +366,11 @@ simd_math_targeted_all_backends!( ); simd_math_targeted_all_backends!(f64_inverse_trig_symmetry, run_f64_inverse_trig_symmetry); simd_math_targeted_all_backends!(f64_inverse_trig_identity, run_f64_inverse_trig_identity); +simd_math_targeted_all_backends!( + f64_inverse_trig_fallback_boundary, + run_f64_inverse_trig_fallback_boundary +); +simd_math_targeted_all_backends!( + f64_atan_reduction_threshold_neighbors, + run_f64_atan_reduction_threshold_neighbors +);