diff --git a/library/stdarch/crates/core_arch/src/aarch64/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/mod.rs index f4b9b1c30251e..0a71fa5a2f92d 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/mod.rs @@ -13,6 +13,11 @@ allow(aarch64_softfloat_neon) )] +#[unstable(feature = "stdarch_aarch64_sve", issue = "99999999")] +pub mod sve; +#[unstable(feature = "stdarch_aarch64_sve", issue = "99999999")] +pub use self::sve::*; + mod mte; #[unstable(feature = "stdarch_aarch64_mte", issue = "129010")] pub use self::mte::*; diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs new file mode 100755 index 0000000000000..14376777b2d43 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -0,0 +1,427 @@ +#![allow(unused_unsafe)] + +// ============================================================================ +// Module Declarations +// ============================================================================ + +mod sve; +mod sve2; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub mod types; + +use types::*; + +// ============================================================================ +// Type Conversion Utilities +// ============================================================================ +// +// These utility functions provide low-level type conversion capabilities +// for SVE types. They use transmute_copy for bit-level reinterpretation +// to avoid triggering E0511 errors with non-SIMD types. + +/// Bit-level reinterpretation for SVE types. +/// +/// This function performs a pure bit-level reinterpretation. SVE wrapper types +/// are treated as opaque at this level to avoid triggering E0511 errors. +#[inline] +#[target_feature(enable = "sve")] +pub(crate) unsafe fn simd_reinterpret(x: T) -> U { + core::mem::transmute_copy::(&x) +} + +/// Type casting for SVE types. +/// +/// Most SVE "casts" in stdarch are just layout-identical reinterpretations. +/// For actual value-semantic conversions, use the corresponding LLVM SVE convert +/// intrinsics in the specific API implementations. +#[inline] +#[target_feature(enable = "sve")] +pub(crate) unsafe fn simd_cast(x: T) -> U { + core::mem::transmute_copy::(&x) +} + +// ============================================================================ +// SVE Select Operation (Predicated Selection) +// ============================================================================ +// +// SVE's predicated selection uses LLVM's aarch64.sve.sel.* intrinsics. +// The intrinsic names correspond to element types/widths: +// - nxv16i8, nxv8i16, nxv4i32, nxv2i64 (integers) +// - nxv4f32, nxv2f64 (floats) +// - nxv16i1 (predicates) +// +// This approach avoids feeding non-SIMD types to simd_select, which would +// trigger E0511 errors. + +/// Trait for static dispatch of SVE select operations to LLVM intrinsics. +pub(crate) trait __SveSelect { + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self; +} + +// LLVM intrinsic declarations for SVE select operations +unsafe extern "C" { + #[link_name = "llvm.aarch64.sve.sel.nxv16i8"] + fn __llvm_sve_sel_nxv16i8(mask: svbool_t, a: svint8_t, b: svint8_t) -> svint8_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv8i16"] + fn __llvm_sve_sel_nxv8i16(mask: svbool_t, a: svint16_t, b: svint16_t) -> svint16_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv4i32"] + fn __llvm_sve_sel_nxv4i32(mask: svbool_t, a: svint32_t, b: svint32_t) -> svint32_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv2i64"] + fn __llvm_sve_sel_nxv2i64(mask: svbool_t, a: svint64_t, b: svint64_t) -> svint64_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv4f32"] + fn __llvm_sve_sel_nxv4f32(mask: svbool_t, a: svfloat32_t, b: svfloat32_t) -> svfloat32_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv2f64"] + fn __llvm_sve_sel_nxv2f64(mask: svbool_t, a: svfloat64_t, b: svfloat64_t) -> svfloat64_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv16i1"] + fn __llvm_sve_sel_nxv16i1(mask: svbool_t, a: svbool_t, b: svbool_t) -> svbool_t; +} + +// Signed integer type implementations +impl __SveSelect for svint8_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv16i8(mask, a, b) + } +} + +impl __SveSelect for svint16_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv8i16(mask, a, b) + } +} + +impl __SveSelect for svint32_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv4i32(mask, a, b) + } +} + +impl __SveSelect for svint64_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv2i64(mask, a, b) + } +} + +// Unsigned integer type implementations +// Note: svuint*_t and svint*_t share the same LLVM intrinsic at the same width +// since they have identical layouts in LLVM. +impl __SveSelect for svuint8_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + core::mem::transmute(__llvm_sve_sel_nxv16i8( + mask, + core::mem::transmute(a), + core::mem::transmute(b), + )) + } +} + +impl __SveSelect for svuint16_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + core::mem::transmute(__llvm_sve_sel_nxv8i16( + mask, + core::mem::transmute(a), + core::mem::transmute(b), + )) + } +} + +impl __SveSelect for svuint32_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + core::mem::transmute(__llvm_sve_sel_nxv4i32( + mask, + core::mem::transmute(a), + core::mem::transmute(b), + )) + } +} + +impl __SveSelect for svuint64_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + core::mem::transmute(__llvm_sve_sel_nxv2i64( + mask, + core::mem::transmute(a), + core::mem::transmute(b), + )) + } +} + +// Floating-point type implementations +impl __SveSelect for svfloat32_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv4f32(mask, a, b) + } +} + +impl __SveSelect for svfloat64_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv2f64(mask, a, b) + } +} + +// Predicate type implementation (1-bit predicate vector, nxv16i1) +impl __SveSelect for svbool_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv16i1(mask, a, b) + } +} + +// TODO: If f16/bf16/mfloat8 are supported in types.rs, add implementations: +// impl __SveSelect for svfloat16_t { ... } +// impl __SveSelect for svbfloat16_t { ... } +// impl __SveSelect for svmfloat8_t { ... } + +// ============================================================================ +// Public Select API +// ============================================================================ +// +// This is the public entry point for select operations, maintaining the +// original function signature (called by sve/*.rs). It now uses trait-based +// static dispatch to LLVM SVE `sel` intrinsics instead of simd_select. + +#[inline] +#[target_feature(enable = "sve")] +pub(crate) unsafe fn simd_select(m: M, a: T, b: T) -> T +where + M: Into, + T: __SveSelect, +{ + let mask: svbool_t = m.into(); + ::sel(mask, a, b) +} + +// ============================================================================ +// Scalar and Pointer Type Conversion Traits +// ============================================================================ +// +// These traits provide conversion capabilities for scalar types and pointers +// used in SVE API implementations. They enable seamless conversion between +// signed and unsigned representations. + +/// Trait for converting between signed and unsigned scalar types. +pub(crate) trait ScalarConversion: Sized { + type Unsigned; + type Signed; + fn as_unsigned(self) -> Self::Unsigned; + fn as_signed(self) -> Self::Signed; +} + +// Signed integer implementations +impl ScalarConversion for i8 { + type Unsigned = u8; + type Signed = i8; + + #[inline(always)] + fn as_unsigned(self) -> u8 { + self as u8 + } + + #[inline(always)] + fn as_signed(self) -> i8 { + self + } +} + +impl ScalarConversion for i16 { + type Unsigned = u16; + type Signed = i16; + + #[inline(always)] + fn as_unsigned(self) -> u16 { + self as u16 + } + + #[inline(always)] + fn as_signed(self) -> i16 { + self + } +} + +impl ScalarConversion for i32 { + type Unsigned = u32; + type Signed = i32; + + #[inline(always)] + fn as_unsigned(self) -> u32 { + self as u32 + } + + #[inline(always)] + fn as_signed(self) -> i32 { + self + } +} + +impl ScalarConversion for i64 { + type Unsigned = u64; + type Signed = i64; + + #[inline(always)] + fn as_unsigned(self) -> u64 { + self as u64 + } + + #[inline(always)] + fn as_signed(self) -> i64 { + self + } +} + +// Unsigned integer implementations +impl ScalarConversion for u8 { + type Unsigned = u8; + type Signed = i8; + + #[inline(always)] + fn as_unsigned(self) -> u8 { + self + } + + #[inline(always)] + fn as_signed(self) -> i8 { + self as i8 + } +} + +impl ScalarConversion for u16 { + type Unsigned = u16; + type Signed = i16; + + #[inline(always)] + fn as_unsigned(self) -> u16 { + self + } + + #[inline(always)] + fn as_signed(self) -> i16 { + self as i16 + } +} + +impl ScalarConversion for u32 { + type Unsigned = u32; + type Signed = i32; + + #[inline(always)] + fn as_unsigned(self) -> u32 { + self + } + + #[inline(always)] + fn as_signed(self) -> i32 { + self as i32 + } +} + +impl ScalarConversion for u64 { + type Unsigned = u64; + type Signed = i64; + + #[inline(always)] + fn as_unsigned(self) -> u64 { + self + } + + #[inline(always)] + fn as_signed(self) -> i64 { + self as i64 + } +} + +// Pointer type conversions are implemented via macro below +macro_rules! impl_scalar_conversion_for_ptr { + ($(($unsigned:ty, $signed:ty)),*) => { + $( + impl ScalarConversion for *const $unsigned { + type Unsigned = *const $unsigned; + type Signed = *const $signed; + + #[inline(always)] + fn as_unsigned(self) -> *const $unsigned { + self + } + + #[inline(always)] + fn as_signed(self) -> *const $signed { + self as *const $signed + } + } + + impl ScalarConversion for *const $signed { + type Unsigned = *const $unsigned; + type Signed = *const $signed; + + #[inline(always)] + fn as_unsigned(self) -> *const $unsigned { + self as *const $unsigned + } + + #[inline(always)] + fn as_signed(self) -> *const $signed { + self + } + } + + impl ScalarConversion for *mut $unsigned { + type Unsigned = *mut $unsigned; + type Signed = *mut $signed; + + #[inline(always)] + fn as_unsigned(self) -> *mut $unsigned { + self + } + + #[inline(always)] + fn as_signed(self) -> *mut $signed { + self as *mut $signed + } + } + + impl ScalarConversion for *mut $signed { + type Unsigned = *mut $unsigned; + type Signed = *mut $signed; + + #[inline(always)] + fn as_unsigned(self) -> *mut $unsigned { + self as *mut $unsigned + } + + #[inline(always)] + fn as_signed(self) -> *mut $signed { + self + } + } + )* + }; +} + +impl_scalar_conversion_for_ptr!((u8, i8), (u16, i16), (u32, i32), (u64, i64)); + +// ============================================================================ +// Public Module Exports +// ============================================================================ + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub use sve::*; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub use sve2::*; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub use types::*; diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs new file mode 100644 index 0000000000000..73ea1441edb32 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -0,0 +1,46098 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec +// ``` +#![allow(improper_ctypes)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; +use crate::core_arch::arch::aarch64::*; + +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] + fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabd_f32_m(pg.into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] + fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabd_f64_m(pg.into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] + fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_s8_m(pg, op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] + fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_s16_m(pg.into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] + fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_s32_m(pg.into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] + fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_s64_m(pg.into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] + fn _svabd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] + fn _svabd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] + fn _svabd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] + fn _svabd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] + fn _svabs_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabs_f32_m(inactive, pg.into(), op) } +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] + fn _svabs_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabs_f64_m(inactive, pg.into(), op) } +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] + fn _svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svabs_s8_m(inactive, pg, op) } +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] + fn _svabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svabs_s16_m(inactive, pg.into(), op) } +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] + fn _svabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svabs_s32_m(inactive, pg.into(), op) } +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] + fn _svabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svabs_s64_m(inactive, pg.into(), op) } +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")] + fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacge_f32(pg.into(), op1, op2).into() } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")] + fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacge_f64(pg.into(), op1, op2).into() } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")] + fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacgt_f32(pg.into(), op1, op2).into() } +} +#[doc = "Absolute compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")] + fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacgt_f64(pg.into(), op1, op2).into() } +} +#[doc = "Absolute compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacge_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacle_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacge_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacle_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacgt_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svaclt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacgt_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svaclt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")] + fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svadd_f32_m(pg.into(), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")] + fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svadd_f64_m(pg.into(), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")] + fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svadd_s8_m(pg, op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")] + fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svadd_s16_m(pg.into(), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")] + fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svadd_s32_m(pg.into(), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")] + fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svadd_s64_m(pg.into(), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add reduction (strictly-ordered)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv4f32")] + fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32; + } + unsafe { _svadda_f32(pg.into(), initial, op) } +} +#[doc = "Add reduction (strictly-ordered)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv2f64")] + fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64; + } + unsafe { _svadda_f64(pg.into(), initial, op) } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] + fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svaddv_f32(pg.into(), op) } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] + fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svaddv_f64(pg.into(), op) } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv2i64")] + fn _svaddv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_s64(pg.into(), op) } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv2i64")] + fn _svaddv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_u64(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv16i8")] + fn _svaddv_s8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_s8(pg, op) } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv8i16")] + fn _svaddv_s16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_s16(pg.into(), op) } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv4i32")] + fn _svaddv_s32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_s32(pg.into(), op) } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv16i8")] + fn _svaddv_u8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv8i16")] + fn _svaddv_u16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_u16(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv4i32")] + fn _svaddv_u32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_u32(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[s32]offset)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv4i32")] + fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t; + } + unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[s32]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv4i32")] + fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[s32]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv4i32")] + fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[s32]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv4i32")] + fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[u32]offset)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t { + unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[u32]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[u32]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[u32]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[s64]offset)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv2i64")] + fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t; + } + unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[s64]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv2i64")] + fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[s64]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv2i64")] + fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[s64]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv2i64")] + fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[u64]offset)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t { + unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[u64]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[u64]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[u64]index)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")] + fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svand_b_z(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")] + fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svand_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")] + fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svand_s16_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")] + fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svand_s32_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")] + fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svand_s64_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv16i8")] + fn _svandv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svandv_s8(pg, op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv8i16")] + fn _svandv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svandv_s16(pg.into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv4i32")] + fn _svandv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svandv_s32(pg.into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv2i64")] + fn _svandv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svandv_s64(pg.into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svandv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svandv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svandv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svandv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv16i8")] + fn _svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svasr_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv8i16")] + fn _svasr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svasr_s16_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv4i32")] + fn _svasr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svasr_s32_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv2i64")] + fn _svasr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svasr_s64_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv16i8" + )] + fn _svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svasr_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv8i16" + )] + fn _svasr_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svasr_wide_s16_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv4i32" + )] + fn _svasr_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svasr_wide_s32_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv16i8")] + fn _svasrd_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svasrd_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv8i16")] + fn _svasrd_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svasrd_n_s16_m(pg.into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv4i32")] + fn _svasrd_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svasrd_n_s32_m(pg.into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv2i64")] + fn _svasrd_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svasrd_n_s64_m(pg.into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")] + fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbic_b_z(pg, op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")] + fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbic_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")] + fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbic_s16_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")] + fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbic_s32_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")] + fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbic_s64_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Break after first true condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")] + fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_m(inactive, pg, op) } +} +#[doc = "Break after first true condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")] + fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_z(pg, op) } +} +#[doc = "Break before first true condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")] + fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_m(inactive, pg, op) } +} +#[doc = "Break before first true condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")] + fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_z(pg, op) } +} +#[doc = "Propagate break to next partition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(brkn))] +pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")] + fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkn_b_z(pg, op1, op2) } +} +#[doc = "Break after first true condition, propagating from previous partition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(brkpa))] +pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1" + )] + fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpa_b_z(pg, op1, op2) } +} +#[doc = "Break before first true condition, propagating from previous partition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(brkpb))] +pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1" + )] + fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpb_b_z(pg, op1, op2) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] + fn _svcadd_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcadd_f32_m(pg.into(), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] + fn _svcadd_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcadd_f64_m(pg.into(), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4f32")] + fn _svclasta_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; + } + unsafe { _svclasta_f32(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2f64")] + fn _svclasta_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclasta_f64(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv16i8")] + fn _svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclasta_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv8i16")] + fn _svclasta_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclasta_s16(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4i32")] + fn _svclasta_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclasta_s32(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2i64")] + fn _svclasta_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclasta_s64(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclasta_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclasta_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclasta_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclasta_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4f32" + )] + fn _svclasta_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclasta_n_f32(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2f64" + )] + fn _svclasta_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclasta_n_f64(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv16i8" + )] + fn _svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclasta_n_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv8i16" + )] + fn _svclasta_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclasta_n_s16(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4i32" + )] + fn _svclasta_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclasta_n_s32(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2i64" + )] + fn _svclasta_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclasta_n_s64(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclasta_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclasta_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclasta_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclasta_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4f32")] + fn _svclastb_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; + } + unsafe { _svclastb_f32(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2f64")] + fn _svclastb_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclastb_f64(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv16i8")] + fn _svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclastb_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv8i16")] + fn _svclastb_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclastb_s16(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4i32")] + fn _svclastb_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclastb_s32(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2i64")] + fn _svclastb_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclastb_s64(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclastb_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclastb_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclastb_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclastb_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4f32" + )] + fn _svclastb_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclastb_n_f32(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2f64" + )] + fn _svclastb_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclastb_n_f64(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv16i8" + )] + fn _svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclastb_n_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv8i16" + )] + fn _svclastb_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclastb_n_s16(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4i32" + )] + fn _svclastb_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclastb_n_s32(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2i64" + )] + fn _svclastb_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclastb_n_s64(pg.into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclastb_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclastb_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclastb_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclastb_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] + fn _svcls_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcls_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcls_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcls_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] + fn _svcls_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcls_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcls_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcls_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] + fn _svcls_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcls_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcls_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcls_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] + fn _svcls_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcls_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcls_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcls_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] + fn _svclz_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svclz_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svclz_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svclz_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] + fn _svclz_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svclz_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svclz_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svclz_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] + fn _svclz_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svclz_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svclz_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svclz_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] + fn _svclz_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svclz_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svclz_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svclz_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svclz_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svclz_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svclz_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svclz_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] + fn _svcmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_f32_m(pg.into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] + fn _svcmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcmla_f64_m(pg.into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" + )] + fn _svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] + fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpeq_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] + fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpeq_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] + fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpeq_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] + fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpeq_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] + fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpeq_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] + fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpeq_s64(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8" + )] + fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpeq_wide_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpeq_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16" + )] + fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpeq_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpeq_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32" + )] + fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpeq_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpeq_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] + fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpge_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] + fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpge_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] + fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpge_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] + fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpge_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] + fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpge_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] + fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_s64(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpge_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv16i8")] + fn _svcmpge_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpge_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv8i16")] + fn _svcmpge_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpge_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv4i32")] + fn _svcmpge_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpge_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv2i64")] + fn _svcmpge_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_u64(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpge_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv16i8" + )] + fn _svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpge_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv8i16" + )] + fn _svcmpge_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpge_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv4i32" + )] + fn _svcmpge_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpge_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv16i8" + )] + fn _svcmpge_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpge_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv8i16" + )] + fn _svcmpge_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpge_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv4i32" + )] + fn _svcmpge_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpge_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] + fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpgt_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] + fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpgt_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] + fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] + fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] + fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] + fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_s64(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv16i8")] + fn _svcmpgt_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv8i16")] + fn _svcmpgt_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv4i32")] + fn _svcmpgt_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv2i64")] + fn _svcmpgt_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_u64(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv16i8" + )] + fn _svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpgt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv8i16" + )] + fn _svcmpgt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpgt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv4i32" + )] + fn _svcmpgt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpgt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv16i8" + )] + fn _svcmpgt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpgt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv8i16" + )] + fn _svcmpgt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpgt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv4i32" + )] + fn _svcmpgt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpgt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpge_f32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmple_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpge_f64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmple_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpge_s8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmple_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpge_s16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmple_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpge_s32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmple_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpge_s64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmple_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpge_u8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmple_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpge_u16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmple_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpge_u32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmple_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpge_u64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmple_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv16i8" + )] + fn _svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmple_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv8i16" + )] + fn _svcmple_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmple_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv4i32" + )] + fn _svcmple_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmple_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv16i8" + )] + fn _svcmple_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmple_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv8i16" + )] + fn _svcmple_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmple_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv4i32" + )] + fn _svcmple_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmple_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpgt_f32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmplt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpgt_f64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmplt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpgt_s8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmplt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpgt_s16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmplt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpgt_s32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmplt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpgt_s64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmplt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpgt_u8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmplt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpgt_u16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmplt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpgt_u32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmplt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpgt_u64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmplt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv16i8" + )] + fn _svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmplt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv8i16" + )] + fn _svcmplt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmplt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv4i32" + )] + fn _svcmplt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmplt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv16i8" + )] + fn _svcmplt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmplt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv8i16" + )] + fn _svcmplt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmplt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv4i32" + )] + fn _svcmplt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmplt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] + fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpne_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpne_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] + fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpne_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpne_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] + fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpne_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpne_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] + fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpne_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpne_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] + fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpne_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpne_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] + fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpne_s64(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpne_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpne_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpne_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpne_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpne_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv16i8" + )] + fn _svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpne_wide_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpne_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv8i16" + )] + fn _svcmpne_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpne_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpne_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv4i32" + )] + fn _svcmpne_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpne_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpne_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare unordered with"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv4f32")] + fn _svcmpuo_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpuo_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare unordered with"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpuo_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare unordered with"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv2f64")] + fn _svcmpuo_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpuo_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare unordered with"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpuo_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] + fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnot_s8_m(inactive, pg, op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] + fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnot_s16_m(inactive, pg.into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] + fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnot_s32_m(inactive, pg.into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] + fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnot_s64_m(inactive, pg.into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4f32")] + fn _svcnt_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcnt_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcnt_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcnt_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2f64")] + fn _svcnt_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcnt_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcnt_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcnt_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv16i8")] + fn _svcnt_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnt_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcnt_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv8i16")] + fn _svcnt_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnt_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcnt_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4i32")] + fn _svcnt_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnt_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcnt_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2i64")] + fn _svcnt_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnt_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcnt_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svcntb() -> u64 { + svcntb_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svcnth() -> u64 { + svcnth_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svcntw() -> u64 { + svcntw_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svcntd() -> u64 { + svcntd_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb_pat)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (rdvl , PATTERN = { svpattern :: SV_ALL }))] +# [cfg_attr (test , assert_instr (cntb , PATTERN = { svpattern :: SV_MUL4 }))] +pub fn svcntb_pat() -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] + fn _svcntb_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntb_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth_pat)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (cnth , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcnth_pat() -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] + fn _svcnth_pat(pattern: svpattern) -> i64; + } + unsafe { _svcnth_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw_pat)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (cntw , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntw_pat() -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntw")] + fn _svcntw_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntw_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd_pat)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (cntd , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntd_pat() -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] + fn _svcntd_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntd_pat(PATTERN).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] + fn _svcntp_b8(pg: svbool_t, op: svbool_t) -> i64; + } + unsafe { _svcntp_b8(pg, op).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] + fn _svcntp_b16(pg: svbool8_t, op: svbool8_t) -> i64; + } + unsafe { _svcntp_b16(pg.into(), op.into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] + fn _svcntp_b32(pg: svbool4_t, op: svbool4_t) -> i64; + } + unsafe { _svcntp_b32(pg.into(), op.into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] + fn _svcntp_b64(pg: svbool2_t, op: svbool2_t) -> i64; + } + unsafe { _svcntp_b64(pg.into(), op.into()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4f32" + )] + fn _svcompact_f32(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svcompact_f32(pg.into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2f64" + )] + fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svcompact_f64(pg.into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4i32" + )] + fn _svcompact_s32(pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcompact_s32(pg.into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2i64" + )] + fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcompact_s64(pg.into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcompact_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcompact_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv8f32.nxv4f32" + )] + fn _svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t; + } + unsafe { _svcreate2_f32(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv4f64.nxv2f64" + )] + fn _svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t; + } + unsafe { _svcreate2_f64(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv32i8.nxv16i8" + )] + fn _svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t; + } + unsafe { _svcreate2_s8(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv16i16.nxv8i16" + )] + fn _svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t; + } + unsafe { _svcreate2_s16(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv8i32.nxv4i32" + )] + fn _svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t; + } + unsafe { _svcreate2_s32(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv4i64.nxv2i64" + )] + fn _svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t; + } + unsafe { _svcreate2_s64(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t { + unsafe { svcreate2_s8(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t { + unsafe { svcreate2_s16(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t { + unsafe { svcreate2_s32(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t { + unsafe { svcreate2_s64(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv12f32.nxv4f32" + )] + fn _svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t; + } + unsafe { _svcreate3_f32(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv6f64.nxv2f64" + )] + fn _svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t; + } + unsafe { _svcreate3_f64(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv48i8.nxv16i8" + )] + fn _svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t; + } + unsafe { _svcreate3_s8(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv24i16.nxv8i16" + )] + fn _svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t; + } + unsafe { _svcreate3_s16(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv12i32.nxv4i32" + )] + fn _svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t; + } + unsafe { _svcreate3_s32(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv6i64.nxv2i64" + )] + fn _svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t; + } + unsafe { _svcreate3_s64(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t { + unsafe { svcreate3_s8(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16x3_t { + unsafe { svcreate3_s16(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32x3_t { + unsafe { svcreate3_s32(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64x3_t { + unsafe { svcreate3_s64(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_f32( + x0: svfloat32_t, + x1: svfloat32_t, + x2: svfloat32_t, + x3: svfloat32_t, +) -> svfloat32x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv16f32.nxv4f32" + )] + fn _svcreate4_f32( + x0: svfloat32_t, + x1: svfloat32_t, + x2: svfloat32_t, + x3: svfloat32_t, + ) -> svfloat32x4_t; + } + unsafe { _svcreate4_f32(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_f64( + x0: svfloat64_t, + x1: svfloat64_t, + x2: svfloat64_t, + x3: svfloat64_t, +) -> svfloat64x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv8f64.nxv2f64" + )] + fn _svcreate4_f64( + x0: svfloat64_t, + x1: svfloat64_t, + x2: svfloat64_t, + x3: svfloat64_t, + ) -> svfloat64x4_t; + } + unsafe { _svcreate4_f64(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8" + )] + fn _svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t; + } + unsafe { _svcreate4_s8(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) -> svint16x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv32i16.nxv8i16" + )] + fn _svcreate4_s16( + x0: svint16_t, + x1: svint16_t, + x2: svint16_t, + x3: svint16_t, + ) -> svint16x4_t; + } + unsafe { _svcreate4_s16(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) -> svint32x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv16i32.nxv4i32" + )] + fn _svcreate4_s32( + x0: svint32_t, + x1: svint32_t, + x2: svint32_t, + x3: svint32_t, + ) -> svint32x4_t; + } + unsafe { _svcreate4_s32(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) -> svint64x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv8i64.nxv2i64" + )] + fn _svcreate4_s64( + x0: svint64_t, + x1: svint64_t, + x2: svint64_t, + x3: svint64_t, + ) -> svint64x4_t; + } + unsafe { _svcreate4_s64(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) -> svuint8x4_t { + unsafe { + svcreate4_s8( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_u16( + x0: svuint16_t, + x1: svuint16_t, + x2: svuint16_t, + x3: svuint16_t, +) -> svuint16x4_t { + unsafe { + svcreate4_s16( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_u32( + x0: svuint32_t, + x1: svuint32_t, + x2: svuint32_t, + x3: svuint32_t, +) -> svuint32x4_t { + unsafe { + svcreate4_s32( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_u64( + x0: svuint64_t, + x1: svuint64_t, + x2: svuint64_t, + x3: svuint64_t, +) -> svuint64x4_t { + unsafe { + svcreate4_s64( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f32f64")] + fn _svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_f64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvt_f32_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvt_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f64f32")] + fn _svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_f32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvt_f64_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + svcvt_f64_f32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { + svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] + fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { + svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u32_m(inactive, pg.into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] + fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u64_m(inactive, pg.into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { + svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { + svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u32_m(inactive, pg.into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u64_m(inactive, pg.into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f32")] + fn _svcvt_s32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_s32_f32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svcvt_s32_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svcvt_s32_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_m(inactive: svint32_t, pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f64")] + fn _svcvt_s32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_s32_f64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_x(pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe { svcvt_s32_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_z(pg: svbool_t, op: svfloat64_t) -> svint32_t { + svcvt_s32_f64_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_m(inactive: svint64_t, pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f32")] + fn _svcvt_s64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_s64_f32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_x(pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe { svcvt_s64_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_z(pg: svbool_t, op: svfloat32_t) -> svint64_t { + svcvt_s64_f32_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f64")] + fn _svcvt_s64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_s64_f64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svcvt_s64_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svcvt_s64_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f32")] + fn _svcvt_u32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_u32_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcvt_u32_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcvt_u32_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_m(inactive: svuint32_t, pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f64")] + fn _svcvt_u32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_u32_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe { svcvt_u32_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + svcvt_u32_f64_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_m(inactive: svuint64_t, pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f32")] + fn _svcvt_u64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_u64_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe { svcvt_u64_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + svcvt_u64_f32_m(svdup_n_u64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f64")] + fn _svcvt_u64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_u64_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcvt_u64_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcvt_u64_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv4f32")] + fn _svdiv_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdiv_f32_m(pg.into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv2f64")] + fn _svdiv_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdiv_f64_m(pg.into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] + fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_s32_m(pg.into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv2i64")] + fn _svdiv_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_s64_m(pg.into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv4i32")] + fn _svdiv_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv2i64")] + fn _svdiv_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv4f32")] + fn _svdivr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdivr_f32_m(pg.into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv2f64")] + fn _svdivr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdivr_f64_m(pg.into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv4i32")] + fn _svdivr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv2i64")] + fn _svdivr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv4i32")] + fn _svdivr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv2i64")] + fn _svdivr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv4i32" + )] + fn _svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svdot_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv2i64" + )] + fn _svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { _svdot_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u32( + op1: svuint32_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv4i32" + )] + fn _svdot_lane_u32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { + _svdot_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u64( + op1: svuint64_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv2i64" + )] + fn _svdot_lane_u64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { + _svdot_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv4i32")] + fn _svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_s32(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s32(op1: svint32_t, op2: svint8_t, op3: i8) -> svint32_t { + svdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv2i64")] + fn _svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_s64(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s64(op1: svint64_t, op2: svint16_t, op3: i16) -> svint64_t { + svdot_s64(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv4i32")] + fn _svdot_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u32(op1: svuint32_t, op2: svuint8_t, op3: u8) -> svuint32_t { + svdot_u32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u64(op1: svuint64_t, op2: svuint16_t, op3: svuint16_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv2i64")] + fn _svdot_u64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u64(op1: svuint64_t, op2: svuint16_t, op3: u16) -> svuint64_t { + svdot_u64(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f32(data: svfloat32_t, index: u32) -> svfloat32_t { + svtbl_f32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + svtbl_f64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s8(data: svint8_t, index: u8) -> svint8_t { + svtbl_s8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s16(data: svint16_t, index: u16) -> svint16_t { + svtbl_s16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s32(data: svint32_t, index: u32) -> svint32_t { + svtbl_s32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s64(data: svint64_t, index: u64) -> svint64_t { + svtbl_s64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u8(data: svuint8_t, index: u8) -> svuint8_t { + svtbl_u8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u16(data: svuint16_t, index: u16) -> svuint16_t { + svtbl_u16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u32(data: svuint32_t, index: u32) -> svuint32_t { + svtbl_u32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + svtbl_u64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b8(op: bool) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i1")] + fn _svdup_n_b8(op: bool) -> svbool_t; + } + unsafe { _svdup_n_b8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b16(op: bool) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i1")] + fn _svdup_n_b16(op: bool) -> svbool8_t; + } + unsafe { _svdup_n_b16(op).into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b32(op: bool) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i1")] + fn _svdup_n_b32(op: bool) -> svbool4_t; + } + unsafe { _svdup_n_b32(op).into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b64(op: bool) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i1")] + fn _svdup_n_b64(op: bool) -> svbool2_t; + } + unsafe { _svdup_n_b64(op).into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64(op: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] + fn _svdup_n_f64(op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8(op: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] + fn _svdup_n_s8(op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16(op: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] + fn _svdup_n_s16(op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32(op: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64(op: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8(op: u8) -> svuint8_t { + unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16(op: u16) -> svuint16_t { + unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32(op: u32) -> svuint32_t { + unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64(op: u64) -> svuint64_t { + unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_m(inactive: svfloat32_t, pg: svbool_t, op: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4f32")] + fn _svdup_n_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_x(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_z(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_m(inactive: svfloat64_t, pg: svbool_t, op: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2f64")] + fn _svdup_n_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_x(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_z(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv16i8")] + fn _svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8_m(inactive, pg, op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_x(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_z(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_m(inactive: svint16_t, pg: svbool_t, op: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv8i16")] + fn _svdup_n_s16_m(inactive: svint16_t, pg: svbool8_t, op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_x(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_z(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_m(inactive: svint32_t, pg: svbool_t, op: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4i32")] + fn _svdup_n_s32_m(inactive: svint32_t, pg: svbool4_t, op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_x(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_z(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_m(inactive: svint64_t, pg: svbool_t, op: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2i64")] + fn _svdup_n_s64_m(inactive: svint64_t, pg: svbool2_t, op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_x(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_z(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_m(inactive: svuint8_t, pg: svbool_t, op: u8) -> svuint8_t { + unsafe { svdup_n_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_x(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_z(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_m(inactive: svuint16_t, pg: svbool_t, op: u16) -> svuint16_t { + unsafe { svdup_n_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_x(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_z(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_m(inactive: svuint32_t, pg: svbool_t, op: u32) -> svuint32_t { + unsafe { svdup_n_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_x(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_z(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_m(inactive: svuint64_t, pg: svbool_t, op: u64) -> svuint64_t { + unsafe { svdup_n_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_x(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_z(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f32(data: svfloat32_t, index: u64) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4f32" + )] + fn _svdupq_lane_f32(data: svfloat32_t, index: i64) -> svfloat32_t; + } + unsafe { _svdupq_lane_f32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2f64" + )] + fn _svdupq_lane_f64(data: svfloat64_t, index: i64) -> svfloat64_t; + } + unsafe { _svdupq_lane_f64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s8(data: svint8_t, index: u64) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv16i8" + )] + fn _svdupq_lane_s8(data: svint8_t, index: i64) -> svint8_t; + } + unsafe { _svdupq_lane_s8(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s16(data: svint16_t, index: u64) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv8i16" + )] + fn _svdupq_lane_s16(data: svint16_t, index: i64) -> svint16_t; + } + unsafe { _svdupq_lane_s16(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s32(data: svint32_t, index: u64) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4i32" + )] + fn _svdupq_lane_s32(data: svint32_t, index: i64) -> svint32_t; + } + unsafe { _svdupq_lane_s32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s64(data: svint64_t, index: u64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2i64" + )] + fn _svdupq_lane_s64(data: svint64_t, index: i64) -> svint64_t; + } + unsafe { _svdupq_lane_s64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u8(data: svuint8_t, index: u64) -> svuint8_t { + unsafe { svdupq_lane_s8(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u16(data: svuint16_t, index: u64) -> svuint16_t { + unsafe { svdupq_lane_s16(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u32(data: svuint32_t, index: u64) -> svuint32_t { + unsafe { svdupq_lane_s32(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + unsafe { svdupq_lane_s64(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_b16( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, +) -> svbool_t { + let op1 = svdupq_n_s16( + x0 as i16, x1 as i16, x2 as i16, x3 as i16, x4 as i16, x5 as i16, x6 as i16, x7 as i16, + ); + svcmpne_wide_s16(svptrue_b16(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t { + let op1 = svdupq_n_s32(x0 as i32, x1 as i32, x2 as i32, x3 as i32); + svcmpne_wide_s32(svptrue_b32(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t { + let op1 = svdupq_n_s64(x0 as i64, x1 as i64); + svcmpne_s64(svptrue_b64(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_b8( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, + x8: bool, + x9: bool, + x10: bool, + x11: bool, + x12: bool, + x13: bool, + x14: bool, + x15: bool, +) -> svbool_t { + let op1 = svdupq_n_s8( + x0 as i8, x1 as i8, x2 as i8, x3 as i8, x4 as i8, x5 as i8, x6 as i8, x7 as i8, x8 as i8, + x9 as i8, x10 as i8, x11 as i8, x12 as i8, x13 as i8, x14 as i8, x15 as i8, + ); + svcmpne_wide_s8(svptrue_b8(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f32)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4f32.v4f32" + )] + fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t; + } + unsafe { + let op = _svdupq_n_f32( + simd_reinterpret(()), + crate::mem::transmute([x0, x1, x2, x3]), + 0, + ); + svdupq_lane_f32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s32)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4i32.v4i32" + )] + fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t; + } + unsafe { + let op = _svdupq_n_s32( + simd_reinterpret(()), + crate::mem::transmute([x0, x1, x2, x3]), + 0, + ); + svdupq_lane_s32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u32)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t { + unsafe { + svdupq_n_s32( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f64)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2f64.v2f64" + )] + fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t; + } + unsafe { + let op = _svdupq_n_f64(simd_reinterpret(()), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_f64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s64)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2i64.v2i64" + )] + fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t; + } + unsafe { + let op = _svdupq_n_s64(simd_reinterpret(()), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_s64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u64)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t { + unsafe { svdupq_n_s64(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s16)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_s16( + x0: i16, + x1: i16, + x2: i16, + x3: i16, + x4: i16, + x5: i16, + x6: i16, + x7: i16, +) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv8i16.v8i16" + )] + fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t; + } + unsafe { + let op = _svdupq_n_s16( + simd_reinterpret(()), + crate::mem::transmute([x0, x1, x2, x3, x4, x5, x6, x7]), + 0, + ); + svdupq_lane_s16(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u16)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_u16( + x0: u16, + x1: u16, + x2: u16, + x3: u16, + x4: u16, + x5: u16, + x6: u16, + x7: u16, +) -> svuint16_t { + unsafe { + svdupq_n_s16( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s8)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_s8( + x0: i8, + x1: i8, + x2: i8, + x3: i8, + x4: i8, + x5: i8, + x6: i8, + x7: i8, + x8: i8, + x9: i8, + x10: i8, + x11: i8, + x12: i8, + x13: i8, + x14: i8, + x15: i8, +) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv16i8.v16i8" + )] + fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t; + } + unsafe { + let op = _svdupq_n_s8( + simd_reinterpret(()), + crate::mem::transmute([ + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, + ]), + 0, + ); + svdupq_lane_s8(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u8)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_u8( + x0: u8, + x1: u8, + x2: u8, + x3: u8, + x4: u8, + x5: u8, + x6: u8, + x7: u8, + x8: u8, + x9: u8, + x10: u8, + x11: u8, + x12: u8, + x13: u8, + x14: u8, + x15: u8, +) -> svuint8_t { + unsafe { + svdupq_n_s8( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + x8.as_signed(), + x9.as_signed(), + x10.as_signed(), + x11.as_signed(), + x12.as_signed(), + x13.as_signed(), + x14.as_signed(), + x15.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.z.nvx16i1")] + fn _sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _sveor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv16i8")] + fn _sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveor_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv8i16")] + fn _sveor_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveor_s16_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv4i32")] + fn _sveor_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveor_s32_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv2i64")] + fn _sveor_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveor_s64_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveor_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveor_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveor_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveor_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv16i8")] + fn _sveorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _sveorv_s8(pg, op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv8i16")] + fn _sveorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _sveorv_s16(pg.into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv4i32")] + fn _sveorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _sveorv_s32(pg.into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv2i64")] + fn _sveorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _sveorv_s64(pg.into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { sveorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { sveorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { sveorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { sveorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f32(op: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv4f32 " + )] + fn _svexpa_f32(op: svint32_t) -> svfloat32_t; + } + unsafe { _svexpa_f32(op.as_signed()) } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f64(op: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv2f64 " + )] + fn _svexpa_f64(op: svint64_t) -> svfloat64_t; + } + unsafe { _svexpa_f64(op.as_signed()) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0, 63); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4f32")] + fn _svext_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svext_f32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2f64")] + fn _svext_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svext_f64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0, 255); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv16i8")] + fn _svext_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svext_s8(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0, 127); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv8i16")] + fn _svext_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svext_s16(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0, 63); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4i32")] + fn _svext_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svext_s32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2i64")] + fn _svext_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svext_s64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0, 255); + unsafe { svext_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0, 127); + unsafe { svext_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0, 63); + unsafe { svext_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0, 31); + unsafe { svext_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv8i16")] + fn _svextb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_s16_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv4i32")] + fn _svextb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_s32_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv4i32")] + fn _svexth_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_s32_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv2i64")] + fn _svextb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_s64_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv2i64")] + fn _svexth_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_s64_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtw.nxv2i64")] + fn _svextw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_s64_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv8i16")] + fn _svextb_u16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_u16_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv4i32")] + fn _svextb_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv4i32")] + fn _svexth_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv2i64")] + fn _svextb_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv2i64")] + fn _svexth_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtw.nxv2i64")] + fn _svextw_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_f32(tuple: svfloat32x2_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv8f32" + )] + fn _svget2_f32(tuple: svfloat32x2_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svget2_f32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_f64(tuple: svfloat64x2_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64" + )] + fn _svget2_f64(tuple: svfloat64x2_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svget2_f64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_s8(tuple: svint8x2_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv32i8" + )] + fn _svget2_s8(tuple: svint8x2_t, imm_index: i32) -> svint8_t; + } + unsafe { _svget2_s8(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_s16(tuple: svint16x2_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv16i16" + )] + fn _svget2_s16(tuple: svint16x2_t, imm_index: i32) -> svint16_t; + } + unsafe { _svget2_s16(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_s32(tuple: svint32x2_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv8i32" + )] + fn _svget2_s32(tuple: svint32x2_t, imm_index: i32) -> svint32_t; + } + unsafe { _svget2_s32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_s64(tuple: svint64x2_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv4i64" + )] + fn _svget2_s64(tuple: svint64x2_t, imm_index: i32) -> svint64_t; + } + unsafe { _svget2_s64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_u8(tuple: svuint8x2_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svget2_s8::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_u16(tuple: svuint16x2_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svget2_s16::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_u32(tuple: svuint32x2_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svget2_s32::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_u64(tuple: svuint64x2_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svget2_s64::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_f32(tuple: svfloat32x3_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv12f32" + )] + fn _svget3_f32(tuple: svfloat32x3_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svget3_f32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_f64(tuple: svfloat64x3_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv6f64" + )] + fn _svget3_f64(tuple: svfloat64x3_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svget3_f64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_s8(tuple: svint8x3_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv48i8" + )] + fn _svget3_s8(tuple: svint8x3_t, imm_index: i32) -> svint8_t; + } + unsafe { _svget3_s8(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_s16(tuple: svint16x3_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv24i16" + )] + fn _svget3_s16(tuple: svint16x3_t, imm_index: i32) -> svint16_t; + } + unsafe { _svget3_s16(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_s32(tuple: svint32x3_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv12i32" + )] + fn _svget3_s32(tuple: svint32x3_t, imm_index: i32) -> svint32_t; + } + unsafe { _svget3_s32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_s64(tuple: svint64x3_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv6i64" + )] + fn _svget3_s64(tuple: svint64x3_t, imm_index: i32) -> svint64_t; + } + unsafe { _svget3_s64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_u8(tuple: svuint8x3_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svget3_s8::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_u16(tuple: svuint16x3_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svget3_s16::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_u32(tuple: svuint32x3_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svget3_s32::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_u64(tuple: svuint64x3_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svget3_s64::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_f32(tuple: svfloat32x4_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv16f32" + )] + fn _svget4_f32(tuple: svfloat32x4_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svget4_f32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_f64(tuple: svfloat64x4_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv8f64" + )] + fn _svget4_f64(tuple: svfloat64x4_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svget4_f64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_s8(tuple: svint8x4_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv64i8" + )] + fn _svget4_s8(tuple: svint8x4_t, imm_index: i32) -> svint8_t; + } + unsafe { _svget4_s8(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_s16(tuple: svint16x4_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv32i16" + )] + fn _svget4_s16(tuple: svint16x4_t, imm_index: i32) -> svint16_t; + } + unsafe { _svget4_s16(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_s32(tuple: svint32x4_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv16i32" + )] + fn _svget4_s32(tuple: svint32x4_t, imm_index: i32) -> svint32_t; + } + unsafe { _svget4_s32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_s64(tuple: svint64x4_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv8i64" + )] + fn _svget4_s64(tuple: svint64x4_t, imm_index: i32) -> svint64_t; + } + unsafe { _svget4_s64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_u8(tuple: svuint8x4_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svget4_s8::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_u16(tuple: svuint16x4_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svget4_s16::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_u32(tuple: svuint32x4_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svget4_s32::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_u64(tuple: svuint64x4_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svget4_s64::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s8(base: i8, step: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv16i8")] + fn _svindex_s8(base: i8, step: i8) -> svint8_t; + } + unsafe { _svindex_s8(base, step) } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s16(base: i16, step: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv8i16")] + fn _svindex_s16(base: i16, step: i16) -> svint16_t; + } + unsafe { _svindex_s16(base, step) } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s32(base: i32, step: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv4i32")] + fn _svindex_s32(base: i32, step: i32) -> svint32_t; + } + unsafe { _svindex_s32(base, step) } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s64(base: i64, step: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv2i64")] + fn _svindex_s64(base: i64, step: i64) -> svint64_t; + } + unsafe { _svindex_s64(base, step) } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u8(base: u8, step: u8) -> svuint8_t { + unsafe { svindex_s8(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u16(base: u16, step: u16) -> svuint16_t { + unsafe { svindex_s16(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u32(base: u32, step: u32) -> svuint32_t { + unsafe { svindex_s32(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u64(base: u64, step: u64) -> svuint64_t { + unsafe { svindex_s64(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4f32")] + fn _svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t; + } + unsafe { _svinsr_n_f32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2f64")] + fn _svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t; + } + unsafe { _svinsr_n_f64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv16i8")] + fn _svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t; + } + unsafe { _svinsr_n_s8(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv8i16")] + fn _svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t; + } + unsafe { _svinsr_n_s16(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4i32")] + fn _svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t; + } + unsafe { _svinsr_n_s32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2i64")] + fn _svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t; + } + unsafe { _svinsr_n_s64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + unsafe { svinsr_n_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + unsafe { svinsr_n_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + unsafe { svinsr_n_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + unsafe { svinsr_n_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")] + fn _svlasta_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlasta_f32(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")] + fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlasta_f64(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")] + fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlasta_s8(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")] + fn _svlasta_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlasta_s16(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")] + fn _svlasta_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlasta_s32(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")] + fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlasta_s64(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlasta_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlasta_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlasta_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlasta_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")] + fn _svlastb_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlastb_f32(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")] + fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlastb_f64(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")] + fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlastb_s8(pg, op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")] + fn _svlastb_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlastb_s16(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")] + fn _svlastb_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlastb_s32(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")] + fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlastb_s64(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlastb_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlastb_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlastb_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlastb_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1_f32(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1_f64(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1_s8(pg, base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1_s16(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1_s32(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1_s64(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32" + )] + fn _svld1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32index_f32(pg.into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32" + )] + fn _svld1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32index_s32(pg.into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svld1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64" + )] + fn _svld1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64index_f64(pg.into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64" + )] + fn _svld1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64index_s64(pg.into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32" + )] + fn _svld1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32index_f32(pg.into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32" + )] + fn _svld1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32index_s32(pg.into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svld1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svld1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32" + )] + fn _svld1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32offset_f32(pg.into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32" + )] + fn _svld1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32offset_s32(pg.into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svld1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2f64" + )] + fn _svld1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64offset_f64(pg.into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i64" + )] + fn _svld1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64offset_s64(pg.into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32" + )] + fn _svld1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32offset_f32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32" + )] + fn _svld1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32offset_s32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svld1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svld1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svld1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svld1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svld1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svld1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svld1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svld1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svld1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svld1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svld1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svld1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svld1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svld1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svld1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svld1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4f32")] + fn _svld1ro_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1ro_f32(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2f64")] + fn _svld1ro_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1ro_f64(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv16i8")] + fn _svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1ro_s8(pg, base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv8i16")] + fn _svld1ro_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1ro_s16(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4i32")] + fn _svld1ro_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1ro_s32(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2i64")] + fn _svld1ro_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1ro_s64(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1ro_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1ro_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1ro_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1ro_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4f32")] + fn _svld1rq_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1rq_f32(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2f64")] + fn _svld1rq_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1rq_f64(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv16i8")] + fn _svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1rq_s8(pg, base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv8i16")] + fn _svld1rq_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1rq_s16(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4i32")] + fn _svld1rq_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1rq_s32(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2i64")] + fn _svld1rq_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1rq_s64(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1rq_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1rq_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1rq_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1rq_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svld1sb_gather_s32offset_s32(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_s32offset_s32(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svld1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast(_svld1sb_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svld1sh_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svld1sw_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svld1sb_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svld1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast(_svld1sb_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast(_svld1sb_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast(_svld1sh_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast(_svld1sw_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast(_svld1sb_s16(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast(_svld1sb_s32(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast(_svld1sh_s32(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast(_svld1sb_s64(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast(_svld1sh_s64(pg.into(), base)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast(_svld1sw_s64(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svld1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svld1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svld1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svld1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svld1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svld1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svld1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svld1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svld1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svld1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svld1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svld1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svld1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svld1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svld1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svld1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svld1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svld1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_s32index_s32(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svld1sh_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svld1sw_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_u32index_s32( + pg.into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svld1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svld1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svld1ub_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast::( + _svld1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svld1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svld1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svld1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svld1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast::( + _svld1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast::( + _svld1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast::( + _svld1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast::( + _svld1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast::(_svld1ub_s16(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast::(_svld1ub_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast::(_svld1uh_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast::(_svld1ub_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast::(_svld1uh_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast::(_svld1uw_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svld1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svld1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svld1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svld1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svld1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svld1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svld1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svld1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svld1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svld1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svld1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svld1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svld1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svld1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svld1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svld1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svld1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svld1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svld1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_s32index_u32(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svld1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svld1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_u32index_u32(pg.into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_f32(pg: svbool_t, base: *const f32) -> svfloat32x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv8f32.nxv4i1" + )] + fn _svld2_f32(pg: svbool4_t, base: *const f32) -> svfloat32x2_t; + } + _svld2_f32(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_f64(pg: svbool_t, base: *const f64) -> svfloat64x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv4f64.nxv2i1" + )] + fn _svld2_f64(pg: svbool2_t, base: *const f64) -> svfloat64x2_t; + } + _svld2_f64(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv32i8.nxv16i1" + )] + fn _svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t; + } + _svld2_s8(pg, base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_s16(pg: svbool_t, base: *const i16) -> svint16x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv16i16.nxv8i1" + )] + fn _svld2_s16(pg: svbool8_t, base: *const i16) -> svint16x2_t; + } + _svld2_s16(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_s32(pg: svbool_t, base: *const i32) -> svint32x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv8i32.nxv4i1" + )] + fn _svld2_s32(pg: svbool4_t, base: *const i32) -> svint32x2_t; + } + _svld2_s32(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_s64(pg: svbool_t, base: *const i64) -> svint64x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv4i64.nxv2i1" + )] + fn _svld2_s64(pg: svbool2_t, base: *const i64) -> svint64x2_t; + } + _svld2_s64(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_u8(pg: svbool_t, base: *const u8) -> svuint8x2_t { + svld2_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_u16(pg: svbool_t, base: *const u16) -> svuint16x2_t { + svld2_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_u32(pg: svbool_t, base: *const u32) -> svuint32x2_t { + svld2_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_u64(pg: svbool_t, base: *const u64) -> svuint64x2_t { + svld2_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x2_t { + svld2_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x2_t { + svld2_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x2_t { + svld2_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x2_t { + svld2_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x2_t { + svld2_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x2_t { + svld2_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x2_t { + svld2_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x2_t { + svld2_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x2_t { + svld2_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x2_t { + svld2_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_f32(pg: svbool_t, base: *const f32) -> svfloat32x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv12f32.nxv4i1" + )] + fn _svld3_f32(pg: svbool4_t, base: *const f32) -> svfloat32x3_t; + } + _svld3_f32(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_f64(pg: svbool_t, base: *const f64) -> svfloat64x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv6f64.nxv2i1" + )] + fn _svld3_f64(pg: svbool2_t, base: *const f64) -> svfloat64x3_t; + } + _svld3_f64(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv48i8.nxv16i1" + )] + fn _svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t; + } + _svld3_s8(pg, base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_s16(pg: svbool_t, base: *const i16) -> svint16x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv24i16.nxv8i1" + )] + fn _svld3_s16(pg: svbool8_t, base: *const i16) -> svint16x3_t; + } + _svld3_s16(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_s32(pg: svbool_t, base: *const i32) -> svint32x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv12i32.nxv4i1" + )] + fn _svld3_s32(pg: svbool4_t, base: *const i32) -> svint32x3_t; + } + _svld3_s32(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_s64(pg: svbool_t, base: *const i64) -> svint64x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv6i64.nxv2i1" + )] + fn _svld3_s64(pg: svbool2_t, base: *const i64) -> svint64x3_t; + } + _svld3_s64(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_u8(pg: svbool_t, base: *const u8) -> svuint8x3_t { + svld3_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_u16(pg: svbool_t, base: *const u16) -> svuint16x3_t { + svld3_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_u32(pg: svbool_t, base: *const u32) -> svuint32x3_t { + svld3_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_u64(pg: svbool_t, base: *const u64) -> svuint64x3_t { + svld3_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x3_t { + svld3_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x3_t { + svld3_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x3_t { + svld3_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x3_t { + svld3_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x3_t { + svld3_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x3_t { + svld3_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x3_t { + svld3_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x3_t { + svld3_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x3_t { + svld3_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x3_t { + svld3_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_f32(pg: svbool_t, base: *const f32) -> svfloat32x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv16f32.nxv4i1" + )] + fn _svld4_f32(pg: svbool4_t, base: *const f32) -> svfloat32x4_t; + } + _svld4_f32(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_f64(pg: svbool_t, base: *const f64) -> svfloat64x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv8f64.nxv2i1" + )] + fn _svld4_f64(pg: svbool2_t, base: *const f64) -> svfloat64x4_t; + } + _svld4_f64(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv64i8.nxv16i1" + )] + fn _svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t; + } + _svld4_s8(pg, base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_s16(pg: svbool_t, base: *const i16) -> svint16x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv32i16.nxv8i1" + )] + fn _svld4_s16(pg: svbool8_t, base: *const i16) -> svint16x4_t; + } + _svld4_s16(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_s32(pg: svbool_t, base: *const i32) -> svint32x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv16i32.nxv4i1" + )] + fn _svld4_s32(pg: svbool4_t, base: *const i32) -> svint32x4_t; + } + _svld4_s32(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_s64(pg: svbool_t, base: *const i64) -> svint64x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv8i64.nxv2i1" + )] + fn _svld4_s64(pg: svbool2_t, base: *const i64) -> svint64x4_t; + } + _svld4_s64(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_u8(pg: svbool_t, base: *const u8) -> svuint8x4_t { + svld4_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_u16(pg: svbool_t, base: *const u16) -> svuint16x4_t { + svld4_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_u32(pg: svbool_t, base: *const u32) -> svuint32x4_t { + svld4_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_u64(pg: svbool_t, base: *const u64) -> svuint64x4_t { + svld4_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x4_t { + svld4_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x4_t { + svld4_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x4_t { + svld4_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x4_t { + svld4_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x4_t { + svld4_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x4_t { + svld4_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x4_t { + svld4_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x4_t { + svld4_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x4_t { + svld4_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x4_t { + svld4_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4f32")] + fn _svldff1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldff1_f32(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2f64")] + fn _svldff1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldff1_f64(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv16i8")] + fn _svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldff1_s8(pg, base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i16")] + fn _svldff1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldff1_s16(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i32")] + fn _svldff1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldff1_s32(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i64")] + fn _svldff1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldff1_s64(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldff1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldff1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldff1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldff1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32" + )] + fn _svldff1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32index_f32(pg.into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32" + )] + fn _svldff1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32index_s32(pg.into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svldff1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2f64" + )] + fn _svldff1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64index_f64(pg.into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i64" + )] + fn _svldff1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64index_s64(pg.into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32" + )] + fn _svldff1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32index_f32(pg.into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32" + )] + fn _svldff1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32index_s32(pg.into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldff1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32" + )] + fn _svldff1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32offset_f32(pg.into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32" + )] + fn _svldff1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32offset_s32(pg.into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svldff1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2f64" + )] + fn _svldff1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64offset_f64(pg.into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i64" + )] + fn _svldff1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64offset_s64(pg.into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32" + )] + fn _svldff1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32offset_f32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32" + )] + fn _svldff1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32offset_s32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldff1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldff1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldff1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldff1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldff1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldff1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldff1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldff1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldff1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldff1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldff1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldff1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldff1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldff1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldff1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svldff1sb_gather_s32offset_s32(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_s32offset_s32(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svldff1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast(_svldff1sb_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svldff1sh_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svldff1sw_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svldff1sb_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast(_svldff1sb_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast(_svldff1sb_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast(_svldff1sh_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast(_svldff1sw_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast(_svldff1sb_s16(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast(_svldff1sb_s32(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast(_svldff1sh_s32(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast(_svldff1sb_s64(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast(_svldff1sh_s64(pg.into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast(_svldff1sw_s64(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldff1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldff1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldff1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldff1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldff1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldff1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldff1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldff1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldff1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldff1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldff1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldff1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldff1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldff1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldff1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldff1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldff1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldff1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_s32index_s32(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svldff1sh_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svldff1sw_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_u32index_s32( + pg.into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svldff1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svldff1ub_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast::( + _svldff1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svldff1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svldff1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldff1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svldff1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast::( + _svldff1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast::( + _svldff1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast::( + _svldff1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast::( + _svldff1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast::(_svldff1ub_s16(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast::(_svldff1ub_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast::(_svldff1uh_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast::(_svldff1ub_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast::(_svldff1uh_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast::(_svldff1uw_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldff1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldff1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldff1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldff1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldff1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldff1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldff1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldff1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldff1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldff1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldff1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldff1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldff1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldff1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldff1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldff1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldff1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldff1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_s32index_u32(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svldff1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svldff1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_u32index_u32(pg.into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4f32")] + fn _svldnf1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnf1_f32(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2f64")] + fn _svldnf1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnf1_f64(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv16i8")] + fn _svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnf1_s8(pg, base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i16")] + fn _svldnf1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnf1_s16(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i32")] + fn _svldnf1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnf1_s32(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i64")] + fn _svldnf1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnf1_s64(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnf1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnf1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnf1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnf1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnf1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnf1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnf1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnf1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnf1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnf1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnf1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnf1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnf1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnf1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast(_svldnf1sb_s16(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast(_svldnf1sb_s32(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast(_svldnf1sh_s32(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast(_svldnf1sb_s64(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast(_svldnf1sh_s64(pg.into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast(_svldnf1sw_s64(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldnf1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldnf1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldnf1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldnf1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldnf1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldnf1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldnf1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldnf1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldnf1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldnf1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldnf1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldnf1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldnf1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldnf1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldnf1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldnf1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldnf1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldnf1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast::(_svldnf1ub_s16(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast::(_svldnf1ub_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast::(_svldnf1uh_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast::(_svldnf1ub_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast::(_svldnf1uh_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast::(_svldnf1uw_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldnf1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldnf1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldnf1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldnf1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldnf1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldnf1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldnf1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldnf1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldnf1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldnf1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldnf1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldnf1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldnf1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldnf1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldnf1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldnf1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldnf1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldnf1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4f32")] + fn _svldnt1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnt1_f32(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2f64")] + fn _svldnt1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnt1_f64(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv16i8")] + fn _svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnt1_s8(pg, base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv8i16")] + fn _svldnt1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnt1_s16(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4i32")] + fn _svldnt1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnt1_s32(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2i64")] + fn _svldnt1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnt1_s64(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnt1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnt1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnt1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnt1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_f32(_op: svfloat32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_f64(_op: svfloat64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_s8(_op: svint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_s16(_op: svint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_s32(_op: svint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_s64(_op: svint64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_u8(_op: svuint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_u16(_op: svuint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_u32(_op: svuint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_u64(_op: svuint64_t) -> u64 { + svcntd() +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv16i8")] + fn _svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsl_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv8i16")] + fn _svlsl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsl_s16_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv4i32")] + fn _svlsl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsl_s32_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv2i64")] + fn _svlsl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsl_s64_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svlsl_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svlsl_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svlsl_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svlsl_s64_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv16i8" + )] + fn _svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsl_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv8i16" + )] + fn _svlsl_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsl_wide_s16_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv4i32" + )] + fn _svlsl_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsl_wide_s32_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe { svlsl_wide_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe { svlsl_wide_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe { svlsl_wide_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv16i8")] + fn _svlsr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv8i16")] + fn _svlsr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv4i32")] + fn _svlsr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv2i64")] + fn _svlsr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv16i8" + )] + fn _svlsr_wide_u8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsr_wide_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv8i16" + )] + fn _svlsr_wide_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsr_wide_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv4i32" + )] + fn _svlsr_wide_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsr_wide_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv4f32")] + fn _svmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmad_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv2f64")] + fn _svmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmad_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv16i8")] + fn _svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmad_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv8i16")] + fn _svmad_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmad_s16_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv4i32")] + fn _svmad_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmad_s32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv2i64")] + fn _svmad_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmad_s64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmad_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmad_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmad_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmad_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv4f32")] + fn _svmax_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmax_f32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv2f64")] + fn _svmax_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmax_f64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv16i8")] + fn _svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_s8_m(pg, op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv8i16")] + fn _svmax_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_s16_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv4i32")] + fn _svmax_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_s32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv2i64")] + fn _svmax_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_s64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv16i8")] + fn _svmax_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv8i16")] + fn _svmax_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv4i32")] + fn _svmax_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv2i64")] + fn _svmax_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv4f32")] + fn _svmaxnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnm_f32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv2f64")] + fn _svmaxnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnm_f64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv4f32" + )] + fn _svmaxnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxnmv_f32(pg.into(), op) } +} +#[doc = "Maximum number reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv2f64" + )] + fn _svmaxnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxnmv_f64(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv4f32")] + fn _svmaxv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxv_f32(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv2f64")] + fn _svmaxv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxv_f64(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv16i8")] + fn _svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_s8(pg, op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv8i16")] + fn _svmaxv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_s16(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv4i32")] + fn _svmaxv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_s32(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv2i64")] + fn _svmaxv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_s64(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv16i8")] + fn _svmaxv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv8i16")] + fn _svmaxv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_u16(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv4i32")] + fn _svmaxv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_u32(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv2i64")] + fn _svmaxv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_u64(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv4f32")] + fn _svmin_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmin_f32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv2f64")] + fn _svmin_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmin_f64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv16i8")] + fn _svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_s8_m(pg, op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv8i16")] + fn _svmin_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_s16_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv4i32")] + fn _svmin_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_s32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv2i64")] + fn _svmin_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_s64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv16i8")] + fn _svmin_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv8i16")] + fn _svmin_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv4i32")] + fn _svmin_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv2i64")] + fn _svmin_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv4f32")] + fn _svminnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnm_f32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv2f64")] + fn _svminnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnm_f64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv4f32" + )] + fn _svminnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminnmv_f32(pg.into(), op) } +} +#[doc = "Minimum number reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv2f64" + )] + fn _svminnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminnmv_f64(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv4f32")] + fn _svminv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminv_f32(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv2f64")] + fn _svminv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminv_f64(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv16i8")] + fn _svminv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_s8(pg, op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv8i16")] + fn _svminv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_s16(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv4i32")] + fn _svminv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_s32(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv2i64")] + fn _svminv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_s64(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv16i8")] + fn _svminv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv8i16")] + fn _svminv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_u16(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv4i32")] + fn _svminv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_u32(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv2i64")] + fn _svminv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_u64(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv4f32")] + fn _svmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmla_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv2f64")] + fn _svmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmla_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv16i8")] + fn _svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmla_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv8i16")] + fn _svmla_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmla_s16_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv4i32")] + fn _svmla_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmla_s32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv2i64")] + fn _svmla_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmla_s64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmla_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmla_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmla_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmla_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv4f32" + )] + fn _svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmla_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv2f64" + )] + fn _svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmla_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv4f32")] + fn _svmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmls_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv2f64")] + fn _svmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmls_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv16i8")] + fn _svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmls_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv8i16")] + fn _svmls_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmls_s16_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv4i32")] + fn _svmls_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmls_s32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv2i64")] + fn _svmls_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmls_s64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmls_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmls_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmls_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmls_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv4f32" + )] + fn _svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmls_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv2f64" + )] + fn _svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmls_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f32])"] +#[inline] +#[target_feature(enable = "sve,f32mm")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv4f32")] + fn _svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmmla_f32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv2f64")] + fn _svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmmla_f64(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(smmla))] +pub fn svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smmla.nxv4i32")] + fn _svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_s32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_u32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(ummla))] +pub fn svmmla_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ummla.nxv4i32")] + fn _svmmla_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Move"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmov[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svmov_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + svand_b_z(pg, op, op) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv4f32")] + fn _svmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmsb_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv2f64")] + fn _svmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmsb_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv16i8")] + fn _svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmsb_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv8i16")] + fn _svmsb_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmsb_s16_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv4i32")] + fn _svmsb_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmsb_s32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv2i64")] + fn _svmsb_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmsb_s64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmsb_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmsb_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmsb_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmsb_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] + fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmul_f32_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] + fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmul_f64_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] + fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmul_s8_m(pg, op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] + fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmul_s16_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] + fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmul_s32_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] + fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmul_s64_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv16i8")] + fn _svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_s8_m(pg, op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv8i16")] + fn _svmulh_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_s16_m(pg.into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv4i32")] + fn _svmulh_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_s32_m(pg.into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv2i64")] + fn _svmulh_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_s64_m(pg.into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv16i8")] + fn _svmulh_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv8i16")] + fn _svmulh_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv4i32")] + fn _svmulh_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv2i64")] + fn _svmulh_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv4f32")] + fn _svmulx_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmulx_f32_m(pg.into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv2f64")] + fn _svmulx_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmulx_f64_m(pg.into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Bitwise NAND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnand[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(nand))] +pub fn svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nand.z.nxv16i1")] + fn _svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnand_b_z(pg, op1, op2) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv4f32")] + fn _svneg_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svneg_f32_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv2f64")] + fn _svneg_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svneg_f64_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv16i8")] + fn _svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svneg_s8_m(inactive, pg, op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv8i16")] + fn _svneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svneg_s16_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv4i32")] + fn _svneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svneg_s32_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv2i64")] + fn _svneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svneg_s64_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv4f32")] + fn _svnmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmad_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv2f64")] + fn _svnmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmad_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv4f32")] + fn _svnmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmla_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv2f64")] + fn _svnmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmla_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv4f32")] + fn _svnmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmls_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv2f64")] + fn _svnmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmls_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv4f32")] + fn _svnmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmsb_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv2f64")] + fn _svnmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmsb_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Bitwise NOR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnor[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(nor))] +pub fn svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nor.z.nxv16i1")] + fn _svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + sveor_b_z(pg, op, pg) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv16i8")] + fn _svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svnot_s8_m(inactive, pg, op) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv8i16")] + fn _svnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svnot_s16_m(inactive, pg.into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv4i32")] + fn _svnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svnot_s32_m(inactive, pg.into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv2i64")] + fn _svnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svnot_s64_m(inactive, pg.into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Bitwise inclusive OR, inverting second argument"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorn[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orn))] +pub fn svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orn.z.nvx16i1")] + fn _svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorn_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.z.nvx16i1")] + fn _svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorr_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] + fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svorr_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] + fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svorr_s16_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] + fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svorr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] + fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svorr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv16i8")] + fn _svorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svorv_s8(pg, op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv8i16")] + fn _svorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svorv_s16(pg.into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv4i32")] + fn _svorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svorv_s32(pg.into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv2i64")] + fn _svorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svorv_s64(pg.into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Set all predicate elements to false"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfalse[_b])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pfalse))] +pub fn svpfalse_b() -> svbool_t { + svdupq_n_b8( + false, false, false, false, false, false, false, false, false, false, false, false, false, + false, false, false, + ) +} +#[doc = "Set the first active predicate element to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfirst[_b])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pfirst))] +pub fn svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pfirst.nxv16i1")] + fn _svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpfirst_b(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv16i1")] + fn _svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpnext_b8(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b16(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv8i1")] + fn _svpnext_b16(pg: svbool8_t, op: svbool8_t) -> svbool8_t; + } + unsafe { _svpnext_b16(pg.into(), op.into()).into() } +} +#[doc = "Find next active predicate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b32(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv4i1")] + fn _svpnext_b32(pg: svbool4_t, op: svbool4_t) -> svbool4_t; + } + unsafe { _svpnext_b32(pg.into(), op.into()).into() } +} +#[doc = "Find next active predicate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b64(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv2i1")] + fn _svpnext_b64(pg: svbool2_t, op: svbool2_t) -> svbool2_t; + } + unsafe { _svpnext_b64(pg.into(), op.into()).into() } +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb(pg: svbool_t, base: *const T) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv16i1")] + fn _svprfb(pg: svbool_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfb(pg, base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh(pg: svbool_t, base: *const T) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv8i1")] + fn _svprfh(pg: svbool8_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfh(pg.into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw(pg: svbool_t, base: *const T) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv4i1")] + fn _svprfw(pg: svbool4_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfw(pg.into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd(pg: svbool_t, base: *const T) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv2i1")] + fn _svprfd(pg: svbool2_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfd(pg.into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s32]offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s32offset( + pg: svbool_t, + base: *const T, + offsets: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.sxtw.index.nxv4i32" + )] + fn _svprfb_gather_s32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_s32offset(pg.into(), base as *const crate::ffi::c_void, offsets, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.sxtw.index.nxv4i32" + )] + fn _svprfh_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.sxtw.index.nxv4i32" + )] + fn _svprfw_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.sxtw.index.nxv4i32" + )] + fn _svprfd_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s64]offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s64offset( + pg: svbool_t, + base: *const T, + offsets: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.index.nxv2i64" + )] + fn _svprfb_gather_s64offset( + pg: svbool2_t, + base: *const crate::ffi::c_void, + offsets: svint64_t, + op: svprfop, + ); + } + _svprfb_gather_s64offset(pg.into(), base as *const crate::ffi::c_void, offsets, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.index.nxv2i64" + )] + fn _svprfh_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfh_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.index.nxv2i64" + )] + fn _svprfw_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfw_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.index.nxv2i64" + )] + fn _svprfd_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfd_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u32]offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u32offset( + pg: svbool_t, + base: *const T, + offsets: svuint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.uxtw.index.nxv4i32" + )] + fn _svprfb_gather_u32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_u32offset( + pg.into(), + base as *const crate::ffi::c_void, + offsets.as_signed(), + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.uxtw.index.nxv4i32" + )] + fn _svprfh_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_u32index( + pg.into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.uxtw.index.nxv4i32" + )] + fn _svprfw_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_u32index( + pg.into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.uxtw.index.nxv4i32" + )] + fn _svprfd_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_u32index( + pg.into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u64]offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u64offset( + pg: svbool_t, + base: *const T, + offsets: svuint64_t, +) { + svprfb_gather_s64offset::(pg, base, offsets.as_signed()) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfh_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfw_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfd_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfb_gather_u32base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfb_gather_u64base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base]_offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base_offset( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base_offset(pg: svbool4_t, bases: svint32_t, offset: i64, op: svprfop); + } + _svprfb_gather_u32base_offset(pg.into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base]_offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base_offset( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base_offset(pg: svbool2_t, bases: svint64_t, offset: i64, op: svprfop); + } + _svprfb_gather_u64base_offset(pg.into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_vnum)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfb::(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_vnum)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfh::(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_vnum)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfw::(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_vnum)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfd::(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Test whether any active element is true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_any)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_any(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.any.nxv16i1" + )] + fn _svptest_any(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_any(pg, op) } +} +#[doc = "Test whether first active element is true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_first)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_first(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.first.nxv16i1" + )] + fn _svptest_first(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_first(pg, op) } +} +#[doc = "Test whether last active element is true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_last)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_last(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.last.nxv16i1" + )] + fn _svptest_last(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_last(pg, op) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b8() -> svbool_t { + svptrue_pat_b8::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b16() -> svbool_t { + svptrue_pat_b16::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b32() -> svbool_t { + svptrue_pat_b32::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b64() -> svbool_t { + svptrue_pat_b64::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b8() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] + fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + } + unsafe { _svptrue_pat_b8(PATTERN) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b16() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] + fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; + } + unsafe { _svptrue_pat_b16(PATTERN).into() } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b32() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] + fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + } + unsafe { _svptrue_pat_b32(PATTERN).into() } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b64() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] + fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + } + unsafe { _svptrue_pat_b64(PATTERN).into() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8" + )] + fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16" + )] + fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32" + )] + fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64" + )] + fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8" + )] + fn _svqadd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16" + )] + fn _svqadd_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32" + )] + fn _svqadd_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64" + )] + fn _svqadd_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s32(op: i32) -> i32 { + svqdecb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s32(op: i32) -> i32 { + svqdech_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s32(op: i32) -> i32 { + svqdecw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s32(op: i32) -> i32 { + svqdecd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s64(op: i64) -> i64 { + svqdecb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s64(op: i64) -> i64 { + svqdech_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s64(op: i64) -> i64 { + svqdecw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s64(op: i64) -> i64 { + svqdecd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u32(op: u32) -> u32 { + svqdecb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u32(op: u32) -> u32 { + svqdech_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u32(op: u32) -> u32 { + svqdecw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u32(op: u32) -> u32 { + svqdecd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u64(op: u64) -> u64 { + svqdecb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u64(op: u64) -> u64 { + svqdech_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u64(op: u64) -> u64 { + svqdecw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u64(op: u64) -> u64 { + svqdecd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n32")] + fn _svqdecb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n32")] + fn _svqdech_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n32")] + fn _svqdecw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n32")] + fn _svqdecd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n64")] + fn _svqdecb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n64")] + fn _svqdech_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n64")] + fn _svqdecw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n64")] + fn _svqdecd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n32")] + fn _svqdecb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n32")] + fn _svqdech_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n32")] + fn _svqdecw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n32")] + fn _svqdecd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n64")] + fn _svqdecb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n64")] + fn _svqdech_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n64")] + fn _svqdecw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n64")] + fn _svqdecd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.nxv8i16")] + fn _svqdech_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.nxv4i32")] + fn _svqdecw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.nxv2i64")] + fn _svqdecd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.nxv8i16")] + fn _svqdech_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.nxv4i32")] + fn _svqdecw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.nxv2i64")] + fn _svqdecd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_s16(op: svint16_t) -> svint16_t { + svqdech_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_s32(op: svint32_t) -> svint32_t { + svqdecw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_s64(op: svint64_t) -> svint64_t { + svqdecd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_u16(op: svuint16_t) -> svuint16_t { + svqdech_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_u32(op: svuint32_t) -> svuint32_t { + svqdecw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_u64(op: svuint64_t) -> svuint64_t { + svqdecd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_s32_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_s32_b16(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_s32_b32(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_s32_b64(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_s64_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_s64_b16(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_s64_b32(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_s64_b64(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_u32_b16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_u32_b32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_u32_b64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_u64_b16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_u64_b32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_u64_b64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv8i16")] + fn _svqdecp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_s16(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv4i32")] + fn _svqdecp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_s32(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv2i64")] + fn _svqdecp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_s64(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv8i16")] + fn _svqdecp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_u16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv4i32")] + fn _svqdecp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_u32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv2i64")] + fn _svqdecp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_u64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s32(op: i32) -> i32 { + svqincb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s32(op: i32) -> i32 { + svqinch_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s32(op: i32) -> i32 { + svqincw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s32(op: i32) -> i32 { + svqincd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s64(op: i64) -> i64 { + svqincb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s64(op: i64) -> i64 { + svqinch_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s64(op: i64) -> i64 { + svqincw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s64(op: i64) -> i64 { + svqincd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u32(op: u32) -> u32 { + svqincb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u32(op: u32) -> u32 { + svqinch_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u32(op: u32) -> u32 { + svqincw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u32(op: u32) -> u32 { + svqincd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u64(op: u64) -> u64 { + svqincb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u64(op: u64) -> u64 { + svqinch_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u64(op: u64) -> u64 { + svqincw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u64(op: u64) -> u64 { + svqincd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n32")] + fn _svqincb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n32")] + fn _svqinch_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n32")] + fn _svqincw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n32")] + fn _svqincd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n64")] + fn _svqincb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n64")] + fn _svqinch_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n64")] + fn _svqincw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n64")] + fn _svqincd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n32")] + fn _svqincb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n32")] + fn _svqinch_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n32")] + fn _svqincw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n32")] + fn _svqincd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n64")] + fn _svqincb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n64")] + fn _svqinch_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n64")] + fn _svqincw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n64")] + fn _svqincd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.nxv8i16")] + fn _svqinch_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.nxv4i32")] + fn _svqincw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.nxv2i64")] + fn _svqincd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.nxv8i16")] + fn _svqinch_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.nxv4i32")] + fn _svqincw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.nxv2i64")] + fn _svqincd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_s16(op: svint16_t) -> svint16_t { + svqinch_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_s32(op: svint32_t) -> svint32_t { + svqincw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_s64(op: svint64_t) -> svint64_t { + svqincd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_u16(op: svuint16_t) -> svuint16_t { + svqinch_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_u32(op: svuint32_t) -> svuint32_t { + svqincw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_u64(op: svuint64_t) -> svuint64_t { + svqincd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv16i1" + )] + fn _svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_s32_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv8i1" + )] + fn _svqincp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_s32_b16(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv4i1" + )] + fn _svqincp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_s32_b32(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv2i1" + )] + fn _svqincp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_s32_b64(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv16i1" + )] + fn _svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_s64_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv8i1" + )] + fn _svqincp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_s64_b16(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv4i1" + )] + fn _svqincp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_s64_b32(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv2i1" + )] + fn _svqincp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_s64_b64(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv16i1" + )] + fn _svqincp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv8i1" + )] + fn _svqincp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_u32_b16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv4i1" + )] + fn _svqincp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_u32_b32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv2i1" + )] + fn _svqincp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_u32_b64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv16i1" + )] + fn _svqincp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv8i1" + )] + fn _svqincp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_u64_b16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv4i1" + )] + fn _svqincp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_u64_b32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv2i1" + )] + fn _svqincp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_u64_b64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv8i16")] + fn _svqincp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_s16(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv4i32")] + fn _svqincp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_s32(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv2i64")] + fn _svqincp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_s64(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv8i16")] + fn _svqincp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_u16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv4i32")] + fn _svqincp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_u32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv2i64")] + fn _svqincp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_u64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8" + )] + fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16" + )] + fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32" + )] + fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64" + )] + fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8" + )] + fn _svqsub_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16" + )] + fn _svqsub_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32" + )] + fn _svqsub_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64" + )] + fn _svqsub_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv16i8")] + fn _svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svrbit_s8_m(inactive, pg, op) } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv8i16")] + fn _svrbit_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrbit_s16_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv4i32")] + fn _svrbit_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrbit_s32_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv2i64")] + fn _svrbit_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrbit_s64_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svrbit_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrbit_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrbit_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrbit_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr() -> svbool_t { + svrdffr_z(svptrue_b8()) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr_z(pg: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rdffr.z")] + fn _svrdffr_z(pg: svbool_t) -> svbool_t; + } + unsafe { _svrdffr_z(pg) } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32" + )] + fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpe_f32(op) } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64" + )] + fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpe_f64(op) } +} +#[doc = "Reciprocal step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv4f32" + )] + fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecps_f32(op1, op2) } +} +#[doc = "Reciprocal step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv2f64" + )] + fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecps_f64(op1, op2) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32" + )] + fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpx_f32_m(inactive, pg.into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64" + )] + fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpx_f64_m(inactive, pg.into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b8(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i1")] + fn _svrev_b8(op: svbool_t) -> svbool_t; + } + unsafe { _svrev_b8(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b16(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i1")] + fn _svrev_b16(op: svbool8_t) -> svbool8_t; + } + unsafe { _svrev_b16(op.into()).into() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b32(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i1")] + fn _svrev_b32(op: svbool4_t) -> svbool4_t; + } + unsafe { _svrev_b32(op.into()).into() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b64(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i1")] + fn _svrev_b64(op: svbool2_t) -> svbool2_t; + } + unsafe { _svrev_b64(op.into()).into() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4f32")] + fn _svrev_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrev_f32(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2f64")] + fn _svrev_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrev_f64(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s8(op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i8")] + fn _svrev_s8(op: svint8_t) -> svint8_t; + } + unsafe { _svrev_s8(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s16(op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i16")] + fn _svrev_s16(op: svint16_t) -> svint16_t; + } + unsafe { _svrev_s16(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s32(op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i32")] + fn _svrev_s32(op: svint32_t) -> svint32_t; + } + unsafe { _svrev_s32(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s64(op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i64")] + fn _svrev_s64(op: svint64_t) -> svint64_t; + } + unsafe { _svrev_s64(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u8(op: svuint8_t) -> svuint8_t { + unsafe { svrev_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u16(op: svuint16_t) -> svuint16_t { + unsafe { svrev_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u32(op: svuint32_t) -> svuint32_t { + unsafe { svrev_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u64(op: svuint64_t) -> svuint64_t { + unsafe { svrev_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv8i16")] + fn _svrevb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrevb_s16_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv4i32")] + fn _svrevb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevb_s32_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv2i64")] + fn _svrevb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevb_s64_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrevb_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevb_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevb_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv4i32")] + fn _svrevh_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevh_s32_m(inactive, pg.into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv2i64")] + fn _svrevh_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevh_s64_m(inactive, pg.into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevh_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevh_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revw.nxv2i64")] + fn _svrevw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevw_s64_m(inactive, pg.into(), op) } +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevw_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv4f32")] + fn _svrinta_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinta_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv2f64")] + fn _svrinta_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinta_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv4f32")] + fn _svrinti_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinti_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv2f64")] + fn _svrinti_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinti_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv4f32")] + fn _svrintm_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintm_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv2f64")] + fn _svrintm_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintm_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv4f32")] + fn _svrintn_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintn_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv2f64")] + fn _svrintn_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintn_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv4f32")] + fn _svrintp_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintp_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv2f64")] + fn _svrintp_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintp_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv4f32")] + fn _svrintx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintx_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv2f64")] + fn _svrintx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintx_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv4f32")] + fn _svrintz_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintz_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv2f64")] + fn _svrintz_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintz_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32" + )] + fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrte_f32(op) } +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64" + )] + fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrte_f64(op) } +} +#[doc = "Reciprocal square root step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32" + )] + fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrts_f32(op1, op2) } +} +#[doc = "Reciprocal square root step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64" + )] + fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrts_f64(op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv4f32")] + fn _svscale_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svscale_f32_m(pg.into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv2f64")] + fn _svscale_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svscale_f64_m(pg.into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe { simd_select(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_f32(tuple: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv8f32.nxv4f32" + )] + fn _svset2_f32(tuple: svfloat32x2_t, imm_index: i32, x: svfloat32_t) -> svfloat32x2_t; + } + unsafe { _svset2_f32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_f64(tuple: svfloat64x2_t, x: svfloat64_t) -> svfloat64x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv4f64.nxv2f64" + )] + fn _svset2_f64(tuple: svfloat64x2_t, imm_index: i32, x: svfloat64_t) -> svfloat64x2_t; + } + unsafe { _svset2_f64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_s8(tuple: svint8x2_t, x: svint8_t) -> svint8x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv32i8.nxv16i8" + )] + fn _svset2_s8(tuple: svint8x2_t, imm_index: i32, x: svint8_t) -> svint8x2_t; + } + unsafe { _svset2_s8(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_s16(tuple: svint16x2_t, x: svint16_t) -> svint16x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv16i16.nxv8i16" + )] + fn _svset2_s16(tuple: svint16x2_t, imm_index: i32, x: svint16_t) -> svint16x2_t; + } + unsafe { _svset2_s16(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_s32(tuple: svint32x2_t, x: svint32_t) -> svint32x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv8i32.nxv4i32" + )] + fn _svset2_s32(tuple: svint32x2_t, imm_index: i32, x: svint32_t) -> svint32x2_t; + } + unsafe { _svset2_s32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_s64(tuple: svint64x2_t, x: svint64_t) -> svint64x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv4i64.nxv2i64" + )] + fn _svset2_s64(tuple: svint64x2_t, imm_index: i32, x: svint64_t) -> svint64x2_t; + } + unsafe { _svset2_s64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_u8(tuple: svuint8x2_t, x: svuint8_t) -> svuint8x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svset2_s8::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_u16(tuple: svuint16x2_t, x: svuint16_t) -> svuint16x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svset2_s16::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_u32(tuple: svuint32x2_t, x: svuint32_t) -> svuint32x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svset2_s32::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_u64(tuple: svuint64x2_t, x: svuint64_t) -> svuint64x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svset2_s64::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_f32(tuple: svfloat32x3_t, x: svfloat32_t) -> svfloat32x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv12f32.nxv4f32" + )] + fn _svset3_f32(tuple: svfloat32x3_t, imm_index: i32, x: svfloat32_t) -> svfloat32x3_t; + } + unsafe { _svset3_f32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_f64(tuple: svfloat64x3_t, x: svfloat64_t) -> svfloat64x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv6f64.nxv2f64" + )] + fn _svset3_f64(tuple: svfloat64x3_t, imm_index: i32, x: svfloat64_t) -> svfloat64x3_t; + } + unsafe { _svset3_f64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_s8(tuple: svint8x3_t, x: svint8_t) -> svint8x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv48i8.nxv16i8" + )] + fn _svset3_s8(tuple: svint8x3_t, imm_index: i32, x: svint8_t) -> svint8x3_t; + } + unsafe { _svset3_s8(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_s16(tuple: svint16x3_t, x: svint16_t) -> svint16x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv24i16.nxv8i16" + )] + fn _svset3_s16(tuple: svint16x3_t, imm_index: i32, x: svint16_t) -> svint16x3_t; + } + unsafe { _svset3_s16(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_s32(tuple: svint32x3_t, x: svint32_t) -> svint32x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv12i32.nxv4i32" + )] + fn _svset3_s32(tuple: svint32x3_t, imm_index: i32, x: svint32_t) -> svint32x3_t; + } + unsafe { _svset3_s32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_s64(tuple: svint64x3_t, x: svint64_t) -> svint64x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv6i64.nxv2i64" + )] + fn _svset3_s64(tuple: svint64x3_t, imm_index: i32, x: svint64_t) -> svint64x3_t; + } + unsafe { _svset3_s64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_u8(tuple: svuint8x3_t, x: svuint8_t) -> svuint8x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svset3_s8::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_u16(tuple: svuint16x3_t, x: svuint16_t) -> svuint16x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svset3_s16::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_u32(tuple: svuint32x3_t, x: svuint32_t) -> svuint32x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svset3_s32::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_u64(tuple: svuint64x3_t, x: svuint64_t) -> svuint64x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svset3_s64::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_f32(tuple: svfloat32x4_t, x: svfloat32_t) -> svfloat32x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv16f32.nxv4f32" + )] + fn _svset4_f32(tuple: svfloat32x4_t, imm_index: i32, x: svfloat32_t) -> svfloat32x4_t; + } + unsafe { _svset4_f32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_f64(tuple: svfloat64x4_t, x: svfloat64_t) -> svfloat64x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv8f64.nxv2f64" + )] + fn _svset4_f64(tuple: svfloat64x4_t, imm_index: i32, x: svfloat64_t) -> svfloat64x4_t; + } + unsafe { _svset4_f64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_s8(tuple: svint8x4_t, x: svint8_t) -> svint8x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv64i8.nxv16i8" + )] + fn _svset4_s8(tuple: svint8x4_t, imm_index: i32, x: svint8_t) -> svint8x4_t; + } + unsafe { _svset4_s8(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_s16(tuple: svint16x4_t, x: svint16_t) -> svint16x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv32i16.nxv8i16" + )] + fn _svset4_s16(tuple: svint16x4_t, imm_index: i32, x: svint16_t) -> svint16x4_t; + } + unsafe { _svset4_s16(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_s32(tuple: svint32x4_t, x: svint32_t) -> svint32x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv16i32.nxv4i32" + )] + fn _svset4_s32(tuple: svint32x4_t, imm_index: i32, x: svint32_t) -> svint32x4_t; + } + unsafe { _svset4_s32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_s64(tuple: svint64x4_t, x: svint64_t) -> svint64x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv8i64.nxv2i64" + )] + fn _svset4_s64(tuple: svint64x4_t, imm_index: i32, x: svint64_t) -> svint64x4_t; + } + unsafe { _svset4_s64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_u8(tuple: svuint8x4_t, x: svuint8_t) -> svuint8x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svset4_s8::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_u16(tuple: svuint16x4_t, x: svuint16_t) -> svuint16x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svset4_s16::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_u32(tuple: svuint32x4_t, x: svuint32_t) -> svuint32x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svset4_s32::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_u64(tuple: svuint64x4_t, x: svuint64_t) -> svuint64x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svset4_s64::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Initialize the first-fault register to all-true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsetffr)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(setffr))] +pub fn svsetffr() { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.setffr")] + fn _svsetffr(); + } + unsafe { _svsetffr() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4f32")] + fn _svsplice_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsplice_f32(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2f64")] + fn _svsplice_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsplice_f64(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv16i8")] + fn _svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsplice_s8(pg, op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv8i16")] + fn _svsplice_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsplice_s16(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4i32")] + fn _svsplice_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsplice_s32(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2i64")] + fn _svsplice_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsplice_s64(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsplice_s8(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsplice_s16(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsplice_s32(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsplice_s64(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv4f32")] + fn _svsqrt_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsqrt_f32_m(inactive, pg.into(), op) } +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv2f64")] + fn _svsqrt_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsqrt_f64_m(inactive, pg.into(), op) } +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst1_f32(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst1_f64(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst1_s8(data, pg, base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst1_s16(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst1_s32(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst1_s64(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svst1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svst1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svst1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svst1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32" + )] + fn _svst1_scatter_s32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_f32(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32" + )] + fn _svst1_scatter_s32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_s32(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64" + )] + fn _svst1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_f64(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64" + )] + fn _svst1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_s64(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32" + )] + fn _svst1_scatter_u32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_f32(data, pg.into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32" + )] + fn _svst1_scatter_u32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_s32(data, pg.into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32" + )] + fn _svst1_scatter_s32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_f32(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32" + )] + fn _svst1_scatter_s32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_s32(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2f64" + )] + fn _svst1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_f64(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i64" + )] + fn _svst1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_s64(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32" + )] + fn _svst1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_f32(data, pg.into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32" + )] + fn _svst1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_s32(data, pg.into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svst1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svst1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svst1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svst1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_f32(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_s32(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_f64(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_s64(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svst1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svst1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svst1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svst1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svst1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svst1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svst1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svst1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svst1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svst1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s16(pg: svbool_t, base: *mut i8, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i8")] + fn _svst1b_s16(data: nxv8i8, pg: svbool8_t, ptr: *mut i8); + } + _svst1b_s16(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s32(pg: svbool_t, base: *mut i8, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i8")] + fn _svst1b_s32(data: nxv4i8, pg: svbool4_t, ptr: *mut i8); + } + _svst1b_s32(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s32(pg: svbool_t, base: *mut i16, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i16")] + fn _svst1h_s32(data: nxv4i16, pg: svbool4_t, ptr: *mut i16); + } + _svst1h_s32(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s64(pg: svbool_t, base: *mut i8, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i8")] + fn _svst1b_s64(data: nxv2i8, pg: svbool2_t, ptr: *mut i8); + } + _svst1b_s64(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s64(pg: svbool_t, base: *mut i16, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i16")] + fn _svst1h_s64(data: nxv2i16, pg: svbool2_t, ptr: *mut i16); + } + _svst1h_s64(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_s64(pg: svbool_t, base: *mut i32, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i32")] + fn _svst1w_s64(data: nxv2i32, pg: svbool2_t, ptr: *mut i32); + } + _svst1w_s64(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u16(pg: svbool_t, base: *mut u8, data: svuint16_t) { + svst1b_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u32(pg: svbool_t, base: *mut u8, data: svuint32_t) { + svst1b_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u32(pg: svbool_t, base: *mut u16, data: svuint32_t) { + svst1h_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u64(pg: svbool_t, base: *mut u8, data: svuint64_t) { + svst1b_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u64(pg: svbool_t, base: *mut u16, data: svuint64_t) { + svst1h_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_u64(pg: svbool_t, base: *mut u32, data: svuint64_t) { + svst1w_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8" + )] + fn _svst1b_scatter_s32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_s32offset_s32(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16" + )] + fn _svst1h_scatter_s32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_s32offset_s32(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svint32_t, + data: svuint32_t, +) { + svst1b_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i8" + )] + fn _svst1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svst1b_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i16" + )] + fn _svst1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svst1h_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i32" + )] + fn _svst1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svst1w_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8" + )] + fn _svst1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16" + )] + fn _svst1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svst1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1b_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svst1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1h_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svst1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1b_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svst1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1h_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svst1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1w_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s16(pg: svbool_t, base: *mut i8, vnum: i64, data: svint16_t) { + svst1b_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s32(pg: svbool_t, base: *mut i8, vnum: i64, data: svint32_t) { + svst1b_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s32(pg: svbool_t, base: *mut i16, vnum: i64, data: svint32_t) { + svst1h_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s64(pg: svbool_t, base: *mut i8, vnum: i64, data: svint64_t) { + svst1b_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s64(pg: svbool_t, base: *mut i16, vnum: i64, data: svint64_t) { + svst1h_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_s64(pg: svbool_t, base: *mut i32, vnum: i64, data: svint64_t) { + svst1w_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u16(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint16_t) { + svst1b_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u32(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint32_t) { + svst1b_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u32(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint32_t) { + svst1h_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u64(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint64_t) { + svst1b_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u64(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint64_t) { + svst1h_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_u64(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint64_t) { + svst1w_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16" + )] + fn _svst1h_scatter_s32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_s32index_s32(simd_cast(data), pg.into(), base, indices) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i16" + )] + fn _svst1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svst1h_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i32" + )] + fn _svst1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svst1w_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16" + )] + fn _svst1h_scatter_u32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_u32index_s32(simd_cast(data), pg.into(), base, indices.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_f32(pg: svbool_t, base: *mut f32, data: svfloat32x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4f32")] + fn _svst2_f32(data0: svfloat32_t, data1: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + pg.into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_f64(pg: svbool_t, base: *mut f64, data: svfloat64x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2f64")] + fn _svst2_f64(data0: svfloat64_t, data1: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + pg.into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_s8(pg: svbool_t, base: *mut i8, data: svint8x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv16i8")] + fn _svst2_s8(data0: svint8_t, data1: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst2_s8(svget2_s8::<0>(data), svget2_s8::<1>(data), pg, base) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_s16(pg: svbool_t, base: *mut i16, data: svint16x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv8i16")] + fn _svst2_s16(data0: svint16_t, data1: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + pg.into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_s32(pg: svbool_t, base: *mut i32, data: svint32x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4i32")] + fn _svst2_s32(data0: svint32_t, data1: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + pg.into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_s64(pg: svbool_t, base: *mut i64, data: svint64x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2i64")] + fn _svst2_s64(data0: svint64_t, data1: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + pg.into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_u8(pg: svbool_t, base: *mut u8, data: svuint8x2_t) { + svst2_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_u16(pg: svbool_t, base: *mut u16, data: svuint16x2_t) { + svst2_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_u32(pg: svbool_t, base: *mut u32, data: svuint32x2_t) { + svst2_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_u64(pg: svbool_t, base: *mut u64, data: svuint64x2_t) { + svst2_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x2_t) { + svst2_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x2_t) { + svst2_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x2_t) { + svst2_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x2_t) { + svst2_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x2_t) { + svst2_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x2_t) { + svst2_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x2_t) { + svst2_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x2_t) { + svst2_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x2_t) { + svst2_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x2_t) { + svst2_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_f32(pg: svbool_t, base: *mut f32, data: svfloat32x3_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4f32")] + fn _svst3_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst3_f32( + svget3_f32::<0>(data), + svget3_f32::<1>(data), + svget3_f32::<2>(data), + pg.into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_f64(pg: svbool_t, base: *mut f64, data: svfloat64x3_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2f64")] + fn _svst3_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst3_f64( + svget3_f64::<0>(data), + svget3_f64::<1>(data), + svget3_f64::<2>(data), + pg.into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_s8(pg: svbool_t, base: *mut i8, data: svint8x3_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv16i8")] + fn _svst3_s8(data0: svint8_t, data1: svint8_t, data2: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst3_s8( + svget3_s8::<0>(data), + svget3_s8::<1>(data), + svget3_s8::<2>(data), + pg, + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_s16(pg: svbool_t, base: *mut i16, data: svint16x3_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv8i16")] + fn _svst3_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); + } + _svst3_s16( + svget3_s16::<0>(data), + svget3_s16::<1>(data), + svget3_s16::<2>(data), + pg.into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_s32(pg: svbool_t, base: *mut i32, data: svint32x3_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4i32")] + fn _svst3_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); + } + _svst3_s32( + svget3_s32::<0>(data), + svget3_s32::<1>(data), + svget3_s32::<2>(data), + pg.into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_s64(pg: svbool_t, base: *mut i64, data: svint64x3_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2i64")] + fn _svst3_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); + } + _svst3_s64( + svget3_s64::<0>(data), + svget3_s64::<1>(data), + svget3_s64::<2>(data), + pg.into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_u8(pg: svbool_t, base: *mut u8, data: svuint8x3_t) { + svst3_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_u16(pg: svbool_t, base: *mut u16, data: svuint16x3_t) { + svst3_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_u32(pg: svbool_t, base: *mut u32, data: svuint32x3_t) { + svst3_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_u64(pg: svbool_t, base: *mut u64, data: svuint64x3_t) { + svst3_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x3_t) { + svst3_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x3_t) { + svst3_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x3_t) { + svst3_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x3_t) { + svst3_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x3_t) { + svst3_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x3_t) { + svst3_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x3_t) { + svst3_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x3_t) { + svst3_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x3_t) { + svst3_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x3_t) { + svst3_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_f32(pg: svbool_t, base: *mut f32, data: svfloat32x4_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4f32")] + fn _svst4_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + data3: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst4_f32( + svget4_f32::<0>(data), + svget4_f32::<1>(data), + svget4_f32::<2>(data), + svget4_f32::<3>(data), + pg.into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_f64(pg: svbool_t, base: *mut f64, data: svfloat64x4_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2f64")] + fn _svst4_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + data3: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst4_f64( + svget4_f64::<0>(data), + svget4_f64::<1>(data), + svget4_f64::<2>(data), + svget4_f64::<3>(data), + pg.into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_s8(pg: svbool_t, base: *mut i8, data: svint8x4_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv16i8")] + fn _svst4_s8( + data0: svint8_t, + data1: svint8_t, + data2: svint8_t, + data3: svint8_t, + pg: svbool_t, + ptr: *mut i8, + ); + } + _svst4_s8( + svget4_s8::<0>(data), + svget4_s8::<1>(data), + svget4_s8::<2>(data), + svget4_s8::<3>(data), + pg, + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_s16(pg: svbool_t, base: *mut i16, data: svint16x4_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv8i16")] + fn _svst4_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + data3: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); + } + _svst4_s16( + svget4_s16::<0>(data), + svget4_s16::<1>(data), + svget4_s16::<2>(data), + svget4_s16::<3>(data), + pg.into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_s32(pg: svbool_t, base: *mut i32, data: svint32x4_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4i32")] + fn _svst4_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + data3: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); + } + _svst4_s32( + svget4_s32::<0>(data), + svget4_s32::<1>(data), + svget4_s32::<2>(data), + svget4_s32::<3>(data), + pg.into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_s64(pg: svbool_t, base: *mut i64, data: svint64x4_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2i64")] + fn _svst4_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + data3: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); + } + _svst4_s64( + svget4_s64::<0>(data), + svget4_s64::<1>(data), + svget4_s64::<2>(data), + svget4_s64::<3>(data), + pg.into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_u8(pg: svbool_t, base: *mut u8, data: svuint8x4_t) { + svst4_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_u16(pg: svbool_t, base: *mut u16, data: svuint16x4_t) { + svst4_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_u32(pg: svbool_t, base: *mut u32, data: svuint32x4_t) { + svst4_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_u64(pg: svbool_t, base: *mut u64, data: svuint64x4_t) { + svst4_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x4_t) { + svst4_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x4_t) { + svst4_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x4_t) { + svst4_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x4_t) { + svst4_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x4_t) { + svst4_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x4_t) { + svst4_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x4_t) { + svst4_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x4_t) { + svst4_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x4_t) { + svst4_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x4_t) { + svst4_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4f32")] + fn _svstnt1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svstnt1_f32(data, pg.into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2f64")] + fn _svstnt1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svstnt1_f64(data, pg.into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv16i8")] + fn _svstnt1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svstnt1_s8(data, pg, base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv8i16")] + fn _svstnt1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svstnt1_s16(data, pg.into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4i32")] + fn _svstnt1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svstnt1_s32(data, pg.into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2i64")] + fn _svstnt1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svstnt1_s64(data, pg.into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svstnt1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svstnt1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svstnt1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svstnt1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svstnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svstnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svstnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svstnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svstnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svstnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svstnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svstnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svstnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svstnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] + fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsub_f32_m(pg.into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] + fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsub_f64_m(pg.into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] + fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsub_s8_m(pg, op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] + fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsub_s16_m(pg.into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] + fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsub_s32_m(pg.into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] + fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsub_s64_m(pg.into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")] + fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsubr_f32_m(pg.into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")] + fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsubr_f64_m(pg.into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")] + fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsubr_s8_m(pg, op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")] + fn _svsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsubr_s16_m(pg.into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")] + fn _svsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsubr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")] + fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsubr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsubr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsubr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsubr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsubr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(sudot, IMM_INDEX = 0))] +pub fn svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svuint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sudot.lane.nxv4i32" + )] + fn _svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svsudot_lane_s32(op1, op2, op3.as_signed(), IMM_INDEX) } +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_s32(op1: svint32_t, op2: svint8_t, op3: svuint8_t) -> svint32_t { + svusdot_s32(op1, op3, op2) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_n_s32(op1: svint32_t, op2: svint8_t, op3: u8) -> svint32_t { + svsudot_s32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f32(data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4f32")] + fn _svtbl_f32(data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbl_f32(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f64(data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2f64")] + fn _svtbl_f64(data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbl_f64(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s8(data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv16i8")] + fn _svtbl_s8(data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbl_s8(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s16(data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv8i16")] + fn _svtbl_s16(data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbl_s16(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s32(data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4i32")] + fn _svtbl_s32(data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbl_s32(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s64(data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2i64")] + fn _svtbl_s64(data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbl_s64(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u8(data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u16(data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u32(data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u64(data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Trigonometric multiply-add coefficient"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv4f32" + )] + fn _svtmad_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svtmad_f32(op1, op2, IMM3) } +} +#[doc = "Trigonometric multiply-add coefficient"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv2f64" + )] + fn _svtmad_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svtmad_f64(op1, op2, IMM3) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i1")] + fn _svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svtrn1_b8(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i1")] + fn _svtrn1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svtrn1_b16(op1.into(), op2.into()).into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i1")] + fn _svtrn1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn1_b32(op1.into(), op2.into()).into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i1")] + fn _svtrn1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn1_b64(op1.into(), op2.into()).into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4f32")] + fn _svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1_f32(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2f64")] + fn _svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1_f64(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i8")] + fn _svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn1_s8(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i16")] + fn _svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn1_s16(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i32")] + fn _svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn1_s32(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i64")] + fn _svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn1_s64(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4f32")] + fn _svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1q_f32(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2f64")] + fn _svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1q_f64(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv16i8")] + fn _svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn1q_s8(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv8i16")] + fn _svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn1q_s16(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4i32")] + fn _svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn1q_s32(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2i64")] + fn _svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn1q_s64(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i1")] + fn _svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svtrn2_b8(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i1")] + fn _svtrn2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svtrn2_b16(op1.into(), op2.into()).into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i1")] + fn _svtrn2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn2_b32(op1.into(), op2.into()).into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i1")] + fn _svtrn2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn2_b64(op1.into(), op2.into()).into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4f32")] + fn _svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2_f32(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2f64")] + fn _svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2_f64(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i8")] + fn _svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2_s8(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i16")] + fn _svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2_s16(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i32")] + fn _svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn2_s32(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i64")] + fn _svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2_s64(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4f32")] + fn _svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2q_f32(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2f64")] + fn _svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2q_f64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv16i8")] + fn _svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2q_s8(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv8i16")] + fn _svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2q_s16(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4i32")] + fn _svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn2q_s32(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2i64")] + fn _svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2q_s64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Trigonometric starting value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv4f32" + )] + fn _svtsmul_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svtsmul_f32(op1, op2.as_signed()) } +} +#[doc = "Trigonometric starting value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv2f64" + )] + fn _svtsmul_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtsmul_f64(op1, op2.as_signed()) } +} +#[doc = "Trigonometric select coefficient"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv4f32" + )] + fn _svtssel_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svtssel_f32(op1, op2.as_signed()) } +} +#[doc = "Trigonometric select coefficient"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv2f64" + )] + fn _svtssel_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtssel_f64(op1, op2.as_signed()) } +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_f32() -> svfloat32x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_f64() -> svfloat64x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_s8() -> svint8x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_s16() -> svint16x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_s32() -> svint32x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_s64() -> svint64x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_u8() -> svuint8x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_u16() -> svuint16x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_u32() -> svuint32x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef2_u64() -> svuint64x2_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_f32() -> svfloat32x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_f64() -> svfloat64x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_s8() -> svint8x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_s16() -> svint16x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_s32() -> svint32x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_s64() -> svint64x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_u8() -> svuint8x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_u16() -> svuint16x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_u32() -> svuint32x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef3_u64() -> svuint64x3_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_f32() -> svfloat32x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_f64() -> svfloat64x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_s8() -> svint8x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_s16() -> svint16x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_s32() -> svint32x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_s64() -> svint64x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_u8() -> svuint8x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_u16() -> svuint16x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_u32() -> svuint32x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef4_u64() -> svuint64x4_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_f32() -> svfloat32_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_f64() -> svfloat64_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_s8() -> svint8_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_s16() -> svint16_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_s32() -> svint32_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_s64() -> svint64_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_u8() -> svuint8_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_u16() -> svuint16_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_u32() -> svuint32_t { + simd_reinterpret(()) +} +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svundef_u64() -> svuint64_t { + simd_reinterpret(()) +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot, IMM_INDEX = 0))] +pub fn svusdot_lane_s32( + op1: svint32_t, + op2: svuint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.usdot.lane.nxv4i32" + )] + fn _svusdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svusdot_lane_s32(op1, op2.as_signed(), op3, IMM_INDEX) } +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usdot.nxv4i32")] + fn _svusdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusdot_s32(op1, op2.as_signed(), op3) } +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_n_s32(op1: svint32_t, op2: svuint8_t, op3: i8) -> svint32_t { + svusdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Matrix multiply-accumulate (unsigned × signed)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusmmla[_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usmmla))] +pub fn svusmmla_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usmmla.nxv4i32")] + fn _svusmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusmmla_s32(op1, op2.as_signed(), op3) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i1")] + fn _svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp1_b8(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i1")] + fn _svuzp1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp1_b16(op1.into(), op2.into()).into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i1")] + fn _svuzp1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp1_b32(op1.into(), op2.into()).into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i1")] + fn _svuzp1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svuzp1_b64(op1.into(), op2.into()).into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4f32")] + fn _svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1_f32(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2f64")] + fn _svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1_f64(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i8")] + fn _svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1_s8(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i16")] + fn _svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1_s16(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i32")] + fn _svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1_s32(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i64")] + fn _svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp1_s64(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4f32")] + fn _svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1q_f32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2f64")] + fn _svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1q_f64(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv16i8")] + fn _svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1q_s8(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv8i16")] + fn _svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1q_s16(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4i32")] + fn _svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1q_s32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2i64")] + fn _svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp1q_s64(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i1")] + fn _svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp2_b8(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i1")] + fn _svuzp2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp2_b16(op1.into(), op2.into()).into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i1")] + fn _svuzp2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp2_b32(op1.into(), op2.into()).into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i1")] + fn _svuzp2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svuzp2_b64(op1.into(), op2.into()).into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4f32")] + fn _svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2_f32(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2f64")] + fn _svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2_f64(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i8")] + fn _svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2_s8(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i16")] + fn _svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2_s16(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i32")] + fn _svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2_s32(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i64")] + fn _svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2_s64(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4f32")] + fn _svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2q_f32(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2f64")] + fn _svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2q_f64(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv16i8")] + fn _svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2q_s8(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv8i16")] + fn _svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2q_s16(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4i32")] + fn _svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2q_s32(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2i64")] + fn _svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2q_s64(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i32" + )] + fn _svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_s32(op1, op2) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i32" + )] + fn _svwhilele_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_s32(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i32" + )] + fn _svwhilele_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_s32(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i32" + )] + fn _svwhilele_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_s32(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i64" + )] + fn _svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_s64(op1, op2) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i64" + )] + fn _svwhilele_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_s64(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i64" + )] + fn _svwhilele_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_s64(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i64" + )] + fn _svwhilele_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_s64(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i32" + )] + fn _svwhilele_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i32" + )] + fn _svwhilele_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i32" + )] + fn _svwhilele_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i32" + )] + fn _svwhilele_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i64" + )] + fn _svwhilele_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i64" + )] + fn _svwhilele_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i64" + )] + fn _svwhilele_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i64" + )] + fn _svwhilele_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i32" + )] + fn _svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_s32(op1, op2) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i32" + )] + fn _svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s32(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i32" + )] + fn _svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s32(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i32" + )] + fn _svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s32(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i64" + )] + fn _svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_s64(op1, op2) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i64" + )] + fn _svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s64(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i64" + )] + fn _svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s64(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i64" + )] + fn _svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s64(op1, op2).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i32" + )] + fn _svwhilelt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i32" + )] + fn _svwhilelt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i32" + )] + fn _svwhilelt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i32" + )] + fn _svwhilelt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i64" + )] + fn _svwhilelt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i64" + )] + fn _svwhilelt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i64" + )] + fn _svwhilelt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i64" + )] + fn _svwhilelt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Write to the first-fault register"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwrffr)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(wrffr))] +pub fn svwrffr(op: svbool_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.wrffr")] + fn _svwrffr(op: svbool_t); + } + unsafe { _svwrffr(op) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i1")] + fn _svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip1_b8(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i1")] + fn _svzip1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svzip1_b16(op1.into(), op2.into()).into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i1")] + fn _svzip1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip1_b32(op1.into(), op2.into()).into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i1")] + fn _svzip1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip1_b64(op1.into(), op2.into()).into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4f32")] + fn _svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1_f32(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2f64")] + fn _svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1_f64(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i8")] + fn _svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1_s8(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i16")] + fn _svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1_s16(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i32")] + fn _svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1_s32(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i64")] + fn _svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip1_s64(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4f32")] + fn _svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1q_f32(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2f64")] + fn _svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1q_f64(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv16i8")] + fn _svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1q_s8(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv8i16")] + fn _svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1q_s16(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4i32")] + fn _svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1q_s32(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2i64")] + fn _svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip1q_s64(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i1")] + fn _svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip2_b8(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i1")] + fn _svzip2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svzip2_b16(op1.into(), op2.into()).into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i1")] + fn _svzip2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip2_b32(op1.into(), op2.into()).into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i1")] + fn _svzip2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip2_b64(op1.into(), op2.into()).into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4f32")] + fn _svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip2_f32(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2f64")] + fn _svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip2_f64(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i8")] + fn _svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip2_s8(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i16")] + fn _svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip2_s16(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i32")] + fn _svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip2_s32(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i64")] + fn _svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip2_s64(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4f32")] + fn _svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip2q_f32(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2f64")] + fn _svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip2q_f64(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv16i8")] + fn _svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip2q_s8(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv8i16")] + fn _svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip2q_s16(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4i32")] + fn _svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip2q_s32(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2i64")] + fn _svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip2q_s64(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs new file mode 100644 index 0000000000000..ebe6fede8b678 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs @@ -0,0 +1,24008 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec +// ``` +#![allow(improper_ctypes)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; + +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv16i8")] + fn _svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_s8(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svaba_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv8i16")] + fn _svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_s16(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svaba_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv4i32")] + fn _svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_s32(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svaba_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv2i64")] + fn _svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_s64(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svaba_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv16i8")] + fn _svaba_u8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_u8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svaba_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv8i16")] + fn _svaba_u16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svaba_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv4i32")] + fn _svaba_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svaba_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv2i64")] + fn _svaba_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svaba_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv8i16")] + fn _svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv4i32")] + fn _svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv2i64")] + fn _svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv8i16")] + fn _svabalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv4i32")] + fn _svabalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv2i64")] + fn _svabalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv8i16")] + fn _svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv4i32")] + fn _svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv2i64")] + fn _svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv8i16")] + fn _svabalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv4i32")] + fn _svabalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv2i64")] + fn _svabalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv8i16")] + fn _svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_s16(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv4i32")] + fn _svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_s32(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv2i64")] + fn _svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_s64(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv8i16")] + fn _svabdlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv4i32")] + fn _svabdlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv2i64")] + fn _svabdlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv8i16")] + fn _svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_s16(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv4i32")] + fn _svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_s32(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv2i64")] + fn _svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_s64(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv8i16")] + fn _svabdlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv4i32")] + fn _svabdlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv2i64")] + fn _svabdlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv8i16")] + fn _svadalp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_s16_m(pg.into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_z(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv4i32")] + fn _svadalp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_s32_m(pg.into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_z(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv2i64")] + fn _svadalp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_s64_m(pg.into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_z(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv8i16")] + fn _svadalp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv4i32")] + fn _svadalp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv2i64")] + fn _svadalp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add with carry long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv4i32")] + fn _svadclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv2i64")] + fn _svadclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv4i32")] + fn _svadclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv2i64")] + fn _svadclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv8i16")] + fn _svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnb_s16(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svaddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv4i32")] + fn _svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnb_s32(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svaddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv2i64")] + fn _svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnb_s64(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svaddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv8i16")] + fn _svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnt_s16(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svaddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv4i32")] + fn _svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnt_s32(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svaddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv2i64")] + fn _svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnt_s64(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svaddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv8i16")] + fn _svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_s16(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv4i32")] + fn _svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_s32(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv2i64")] + fn _svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_s64(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv8i16")] + fn _svaddlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv4i32")] + fn _svaddlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv2i64")] + fn _svaddlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv8i16" + )] + fn _svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlbt_s16(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv4i32" + )] + fn _svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlbt_s32(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv2i64" + )] + fn _svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlbt_s64(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv8i16")] + fn _svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_s16(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv4i32")] + fn _svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_s32(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv2i64")] + fn _svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_s64(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv8i16")] + fn _svaddlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv4i32")] + fn _svaddlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv2i64")] + fn _svaddlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv4f32")] + fn _svaddp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svaddp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svaddp_f32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv2f64")] + fn _svaddp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svaddp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svaddp_f64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv16i8")] + fn _svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaddp_s8_m(pg, op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svaddp_s8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv8i16")] + fn _svaddp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svaddp_s16_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svaddp_s16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv4i32")] + fn _svaddp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svaddp_s32_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svaddp_s32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv2i64")] + fn _svaddp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svaddp_s64_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svaddp_s64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svaddp_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svaddp_u8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svaddp_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svaddp_u16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svaddp_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svaddp_u32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svaddp_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svaddp_u64_m(pg, op1, op2) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv8i16")] + fn _svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_s16(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv4i32")] + fn _svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_s32(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv2i64")] + fn _svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_s64(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv8i16")] + fn _svaddwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv4i32")] + fn _svaddwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv2i64")] + fn _svaddwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv8i16")] + fn _svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_s16(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv4i32")] + fn _svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_s32(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv2i64")] + fn _svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_s64(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv8i16")] + fn _svaddwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv4i32")] + fn _svaddwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv2i64")] + fn _svaddwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "AES single round decryption"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesd[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(aesd))] +pub fn svaesd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesd")] + fn _svaesd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaesd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES single round encryption"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaese[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(aese))] +pub fn svaese_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aese")] + fn _svaese_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaese_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES inverse mix columns"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesimc[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(aesimc))] +pub fn svaesimc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesimc")] + fn _svaesimc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesimc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "AES mix columns"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesmc[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(aesmc))] +pub fn svaesmc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesmc")] + fn _svaesmc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesmc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv16i8")] + fn _svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbcax_s8(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbcax_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv8i16")] + fn _svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbcax_s16(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbcax_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv4i32")] + fn _svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbcax_s32(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbcax_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv2i64")] + fn _svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbcax_s64(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbcax_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbcax_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbcax_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbcax_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbcax_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbcax_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbcax_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbcax_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbcax_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv16i8")] + fn _svbdep_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbdep_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbdep_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv8i16")] + fn _svbdep_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbdep_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbdep_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv4i32")] + fn _svbdep_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbdep_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbdep_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv2i64")] + fn _svbdep_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbdep_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbdep_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv16i8")] + fn _svbext_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbext_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbext_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv8i16")] + fn _svbext_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbext_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbext_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv4i32")] + fn _svbext_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbext_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbext_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv2i64")] + fn _svbext_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbext_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbext_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv16i8")] + fn _svbgrp_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbgrp_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbgrp_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv8i16")] + fn _svbgrp_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbgrp_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbgrp_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv4i32")] + fn _svbgrp_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbgrp_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbgrp_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv2i64")] + fn _svbgrp_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbgrp_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbgrp_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv16i8")] + fn _svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl1n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl1n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv8i16")] + fn _svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl1n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl1n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv4i32")] + fn _svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl1n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl1n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv2i64")] + fn _svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl1n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl1n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl1n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl1n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl1n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl1n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl1n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl1n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl1n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl1n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv16i8")] + fn _svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl2n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl2n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv8i16")] + fn _svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl2n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl2n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv4i32")] + fn _svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl2n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl2n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv2i64")] + fn _svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl2n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl2n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl2n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl2n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl2n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl2n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl2n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl2n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl2n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl2n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv16i8")] + fn _svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv8i16")] + fn _svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv4i32")] + fn _svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv2i64")] + fn _svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv16i8")] + fn _svcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv8i16")] + fn _svcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv4i32")] + fn _svcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv2i64")] + fn _svcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv4i32" + )] + fn _svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv2i64" + )] + fn _svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_lane_s64(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv4i32")] + fn _svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv2i64")] + fn _svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv8i16" + )] + fn _svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv4i32" + )] + fn _svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s16::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s32::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv16i8")] + fn _svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcmla_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv8i16")] + fn _svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv4i32")] + fn _svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv2i64")] + fn _svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcmla_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u8( + op1: svuint8_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s8::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Up convert long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtlt.f64f32")] + fn _svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) + -> svfloat64_t; + } + unsafe { _svcvtlt_f64_f32_m(inactive, pg.into(), op) } +} +#[doc = "Up convert long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvtlt_f64_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtnt.f32f64")] + fn _svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtnt_f32_f64_m(even, pg.into(), op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtnt_f32_f64_m(even, pg, op) +} +#[doc = "Down convert, rounding to odd"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtx.f32f64")] + fn _svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtx_f32_f64_m(inactive, pg.into(), op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvtx_f32_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtx_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtxnt.f32f64")] + fn _svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtxnt_f32_f64_m(even, pg.into(), op) } +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtxnt_f32_f64_m(even, pg, op) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv16i8")] + fn _sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _sveor3_s8(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + sveor3_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv8i16")] + fn _sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _sveor3_s16(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + sveor3_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv4i32")] + fn _sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _sveor3_s32(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + sveor3_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv2i64")] + fn _sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _sveor3_s64(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + sveor3_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { sveor3_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + sveor3_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { sveor3_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + sveor3_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { sveor3_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + sveor3_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { sveor3_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + sveor3_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv16i8")] + fn _sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveorbt_s8(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s8(odd: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveorbt_s8(odd, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv8i16")] + fn _sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveorbt_s16(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s16(odd: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveorbt_s16(odd, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv4i32")] + fn _sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveorbt_s32(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s32(odd: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveorbt_s32(odd, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv2i64")] + fn _sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveorbt_s64(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s64(odd: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveorbt_s64(odd, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u8(odd: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveorbt_s8(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u8(odd: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveorbt_u8(odd, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u16(odd: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveorbt_s16(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u16(odd: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveorbt_u16(odd, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u32(odd: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveorbt_s32(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u32(odd: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveorbt_u32(odd, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u64(odd: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveorbt_s64(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u64(odd: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveorbt_u64(odd, op1, svdup_n_u64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv16i8")] + fn _sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveortb_s8(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s8(even: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveortb_s8(even, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv8i16")] + fn _sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveortb_s16(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s16(even: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveortb_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv4i32")] + fn _sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveortb_s32(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s32(even: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveortb_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv2i64")] + fn _sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveortb_s64(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s64(even: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveortb_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u8(even: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveortb_s8(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u8(even: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveortb_u8(even, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u16(even: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveortb_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u16(even: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveortb_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u32(even: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveortb_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u32(even: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveortb_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u64(even: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveortb_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u64(even: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveortb_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv16i8")] + fn _svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_s8_m(pg, op1, op2) } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv8i16")] + fn _svhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_s16_m(pg.into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv4i32")] + fn _svhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_s32_m(pg.into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv2i64")] + fn _svhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_s64_m(pg.into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv16i8")] + fn _svhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv8i16")] + fn _svhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv4i32")] + fn _svhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv2i64")] + fn _svhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Count matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv4i32" + )] + fn _svhistcnt_s32_z(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhistcnt_s32_z(pg.into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv2i64" + )] + fn _svhistcnt_s64_z(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhistcnt_s64_z(pg.into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svhistcnt_s32_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svhistcnt_s64_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histseg.nxv16i8" + )] + fn _svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhistseg_s8(op1, op2).as_unsigned() } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svhistseg_s8(op1.as_signed(), op2.as_signed()) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv16i8")] + fn _svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv8i16")] + fn _svhsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_s16_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv4i32")] + fn _svhsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_s32_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv2i64")] + fn _svhsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_s64_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv16i8")] + fn _svhsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv8i16")] + fn _svhsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv4i32")] + fn _svhsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv2i64")] + fn _svhsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv16i8")] + fn _svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv8i16")] + fn _svhsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_s16_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv4i32")] + fn _svhsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv2i64")] + fn _svhsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv16i8")] + fn _svhsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv8i16")] + fn _svhsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv4i32")] + fn _svhsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv2i64")] + fn _svhsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2f64" + )] + fn _svldnt1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64index_f64(pg.into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i64" + )] + fn _svldnt1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64index_s64(pg.into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2f64" + )] + fn _svldnt1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64offset_f64(pg.into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i64" + )] + fn _svldnt1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64offset_s64(pg.into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32" + )] + fn _svldnt1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldnt1_gather_u32offset_f32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32" + )] + fn _svldnt1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldnt1_gather_u32offset_s32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldnt1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldnt1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldnt1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldnt1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast(_svldnt1sb_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svldnt1sh_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svldnt1sw_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svldnt1sb_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldnt1sh_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast(_svldnt1sb_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast(_svldnt1sh_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast(_svldnt1sb_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast(_svldnt1sh_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast(_svldnt1sw_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svldnt1sh_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svldnt1sw_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast::( + _svldnt1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svldnt1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svldnt1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldnt1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldnt1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svldnt1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldnt1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast::( + _svldnt1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast::( + _svldnt1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast::( + _svldnt1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast::( + _svldnt1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast::( + _svldnt1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svldnt1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svldnt1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv4f32")] + fn _svlogb_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svlogb_f32_m(inactive, pg.into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svlogb_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svlogb_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv2f64")] + fn _svlogb_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svlogb_f64_m(inactive, pg.into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svlogb_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svlogb_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Detect any matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv16i8")] + fn _svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svmatch_s8(pg, op1, op2) } +} +#[doc = "Detect any matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv8i16")] + fn _svmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svmatch_s16(pg.into(), op1, op2).into() } +} +#[doc = "Detect any matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect any matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Maximum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv4f32" + )] + fn _svmaxnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnmp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnmp_f32_m(pg, op1, op2) +} +#[doc = "Maximum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv2f64" + )] + fn _svmaxnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnmp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnmp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv4f32")] + fn _svmaxp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxp_f32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv2f64")] + fn _svmaxp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv16i8")] + fn _svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_s8_m(pg, op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmaxp_s8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv8i16")] + fn _svmaxp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_s16_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmaxp_s16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv4i32")] + fn _svmaxp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_s32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmaxp_s32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv2i64")] + fn _svmaxp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_s64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmaxp_s64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv16i8")] + fn _svmaxp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmaxp_u8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv8i16")] + fn _svmaxp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmaxp_u16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv4i32")] + fn _svmaxp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmaxp_u32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv2i64")] + fn _svmaxp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmaxp_u64_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv4f32" + )] + fn _svminnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnmp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnmp_f32_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv2f64" + )] + fn _svminnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnmp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnmp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv4f32")] + fn _svminp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminp_f32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv2f64")] + fn _svminp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv16i8")] + fn _svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_s8_m(pg, op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svminp_s8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv8i16")] + fn _svminp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_s16_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svminp_s16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv4i32")] + fn _svminp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_s32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svminp_s32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv2i64")] + fn _svminp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_s64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svminp_s64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv16i8")] + fn _svminp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svminp_u8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv8i16")] + fn _svminp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svminp_u16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv4i32")] + fn _svminp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svminp_u32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv2i64")] + fn _svminp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svminp_u64_m(pg, op1, op2) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv8i16" + )] + fn _svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmla_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv4i32" + )] + fn _svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmla_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv2i64" + )] + fn _svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmla_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe { + svmla_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { + svmla_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { + svmla_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv8i16")] + fn _svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv4i32")] + fn _svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv2i64")] + fn _svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv8i16")] + fn _svmlalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv4i32")] + fn _svmlalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv2i64")] + fn _svmlalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv8i16")] + fn _svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv4i32")] + fn _svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv2i64")] + fn _svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv8i16")] + fn _svmlalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv4i32")] + fn _svmlalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv2i64")] + fn _svmlalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv8i16" + )] + fn _svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmls_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv4i32" + )] + fn _svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmls_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv2i64" + )] + fn _svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmls_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe { + svmls_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { + svmls_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { + svmls_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv8i16")] + fn _svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv4i32")] + fn _svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv2i64")] + fn _svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv8i16")] + fn _svmlslb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv4i32")] + fn _svmlslb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv2i64")] + fn _svmlslb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv8i16")] + fn _svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv4i32")] + fn _svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv2i64")] + fn _svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv8i16")] + fn _svmlslt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv4i32")] + fn _svmlslt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv2i64")] + fn _svmlslt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s16(op: svint8_t) -> svint16_t { + svshllb_n_s16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s32(op: svint16_t) -> svint32_t { + svshllb_n_s32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s64(op: svint32_t) -> svint64_t { + svshllb_n_s64::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u16(op: svuint8_t) -> svuint16_t { + svshllb_n_u16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u32(op: svuint16_t) -> svuint32_t { + svshllb_n_u32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u64(op: svuint32_t) -> svuint64_t { + svshllb_n_u64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s16(op: svint8_t) -> svint16_t { + svshllt_n_s16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s32(op: svint16_t) -> svint32_t { + svshllt_n_s32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s64(op: svint32_t) -> svint64_t { + svshllt_n_s64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u16(op: svuint8_t) -> svuint16_t { + svshllt_n_u16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u32(op: svuint16_t) -> svuint32_t { + svshllt_n_u32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u64(op: svuint32_t) -> svuint64_t { + svshllt_n_u64::<0>(op) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv4f32" + )] + fn _svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svmul_lane_f32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv2f64" + )] + fn _svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svmul_lane_f64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv8i16" + )] + fn _svmul_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svmul_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv4i32" + )] + fn _svmul_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmul_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv2i64" + )] + fn _svmul_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmul_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe { svmul_lane_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svmul_lane_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svmul_lane_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv4i32" + )] + fn _svmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv2i64" + )] + fn _svmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv4i32" + )] + fn _svmullb_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv2i64" + )] + fn _svmullb_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv8i16")] + fn _svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_s16(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv4i32")] + fn _svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_s32(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv2i64")] + fn _svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_s64(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv8i16")] + fn _svmullb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv4i32")] + fn _svmullb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv2i64")] + fn _svmullb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv4i32" + )] + fn _svmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv2i64" + )] + fn _svmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv4i32" + )] + fn _svmullt_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv2i64" + )] + fn _svmullt_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")] + fn _svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_s16(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")] + fn _svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_s32(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")] + fn _svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_s64(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")] + fn _svmullt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")] + fn _svmullt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")] + fn _svmullt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv16i8")] + fn _svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svnbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svnbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv8i16")] + fn _svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svnbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svnbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv4i32")] + fn _svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svnbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svnbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv2i64")] + fn _svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svnbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svnbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svnbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svnbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svnbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svnbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svnbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svnbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svnbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svnbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Detect no matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv16i8")] + fn _svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svnmatch_s8(pg, op1, op2) } +} +#[doc = "Detect no matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv8i16")] + fn _svnmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svnmatch_s16(pg.into(), op1, op2).into() } +} +#[doc = "Detect no matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svnmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect no matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svnmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Polynomial multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pmul.nxv16i8")] + fn _svpmul_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmul_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmul_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv16i8" + )] + fn _svpmullb_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullb_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullb_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv4i32" + )] + fn _svpmullb_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullb_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullb_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv2i64" + )] + fn _svpmullb_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullb_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullb_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { simd_reinterpret(svpmullb_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { simd_reinterpret(svpmullb_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv16i8" + )] + fn _svpmullt_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullt_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullt_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv4i32" + )] + fn _svpmullt_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullt_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullt_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv2i64" + )] + fn _svpmullt_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullt_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullt_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { simd_reinterpret(svpmullt_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { simd_reinterpret(svpmullt_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv16i8")] + fn _svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqabs_s8_m(inactive, pg, op) } +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv8i16")] + fn _svqabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqabs_s16_m(inactive, pg.into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv4i32")] + fn _svqabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqabs_s32_m(inactive, pg.into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv2i64")] + fn _svqabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqabs_s64_m(inactive, pg.into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv16i8")] + fn _svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8_m(pg, op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv8i16")] + fn _svqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv4i32")] + fn _svqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv2i64")] + fn _svqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv16i8")] + fn _svqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv8i16")] + fn _svqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv4i32")] + fn _svqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv2i64")] + fn _svqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv16i8" + )] + fn _svqcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svqcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv8i16" + )] + fn _svqcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svqcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv4i32" + )] + fn _svqcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svqcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv2i64" + )] + fn _svqcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svqcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv4i32" + )] + fn _svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv2i64" + )] + fn _svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv8i16" + )] + fn _svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv4i32" + )] + fn _svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv2i64" + )] + fn _svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv8i16" + )] + fn _svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv4i32" + )] + fn _svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv2i64" + )] + fn _svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv4i32" + )] + fn _svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv2i64" + )] + fn _svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv8i16" + )] + fn _svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv4i32" + )] + fn _svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv2i64" + )] + fn _svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv4i32" + )] + fn _svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv2i64" + )] + fn _svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv8i16" + )] + fn _svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv4i32" + )] + fn _svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv2i64" + )] + fn _svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv8i16" + )] + fn _svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv4i32" + )] + fn _svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv2i64" + )] + fn _svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv4i32" + )] + fn _svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv2i64" + )] + fn _svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv8i16" + )] + fn _svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv4i32" + )] + fn _svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv2i64" + )] + fn _svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv8i16" + )] + fn _svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv4i32" + )] + fn _svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv2i64" + )] + fn _svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv16i8" + )] + fn _svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqdmulh_s8(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv8i16" + )] + fn _svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqdmulh_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv4i32" + )] + fn _svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqdmulh_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv2i64" + )] + fn _svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqdmulh_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv4i32" + )] + fn _svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv2i64" + )] + fn _svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv8i16" + )] + fn _svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullb_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv4i32" + )] + fn _svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullb_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv2i64" + )] + fn _svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullb_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv4i32" + )] + fn _svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv2i64" + )] + fn _svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv8i16" + )] + fn _svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullt_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv4i32" + )] + fn _svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullt_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv2i64" + )] + fn _svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullt_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv16i8")] + fn _svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqneg_s8_m(inactive, pg, op) } +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv8i16")] + fn _svqneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqneg_s16_m(inactive, pg.into(), op) } +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv4i32")] + fn _svqneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqneg_s32_m(inactive, pg.into(), op) } +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv2i64")] + fn _svqneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqneg_s64_m(inactive, pg.into(), op) } +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv8i16" + )] + fn _svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv4i32" + )] + fn _svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, +) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv16i8" + )] + fn _svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint8_t; + } + unsafe { _svqrdcmlah_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv8i16" + )] + fn _svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv4i32" + )] + fn _svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv2i64" + )] + fn _svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svqrdcmlah_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv8i16" + )] + fn _svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlah_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv4i32" + )] + fn _svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlah_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv2i64" + )] + fn _svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlah_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv16i8" + )] + fn _svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlah_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlah_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv8i16" + )] + fn _svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlah_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlah_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv4i32" + )] + fn _svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlah_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlah_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv2i64" + )] + fn _svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlah_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlah_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16" + )] + fn _svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlsh_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32" + )] + fn _svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlsh_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64" + )] + fn _svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlsh_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv16i8" + )] + fn _svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlsh_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlsh_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv8i16" + )] + fn _svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlsh_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlsh_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv4i32" + )] + fn _svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlsh_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlsh_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv2i64" + )] + fn _svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlsh_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlsh_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv8i16" + )] + fn _svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqrdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv4i32" + )] + fn _svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqrdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv2i64" + )] + fn _svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqrdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv16i8" + )] + fn _svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrdmulh_s8(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqrdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv8i16" + )] + fn _svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrdmulh_s16(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqrdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv4i32" + )] + fn _svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrdmulh_s32(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqrdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv2i64" + )] + fn _svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrdmulh_s64(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqrdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv16i8")] + fn _svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv8i16")] + fn _svqrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv4i32")] + fn _svqrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv2i64")] + fn _svqrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv16i8")] + fn _svqrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv8i16")] + fn _svqrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv4i32")] + fn _svqrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv2i64")] + fn _svqrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv8i16" + )] + fn _svqrshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv4i32" + )] + fn _svqrshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv2i64" + )] + fn _svqrshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv8i16" + )] + fn _svqrshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv4i32" + )] + fn _svqrshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv2i64" + )] + fn _svqrshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv16i8")] + fn _svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv8i16")] + fn _svqshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv4i32")] + fn _svqshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv2i64")] + fn _svqshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv16i8")] + fn _svqshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv8i16")] + fn _svqshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv4i32")] + fn _svqshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv2i64")] + fn _svqshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t) -> svuint8_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv16i8")] + fn _svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshlu_n_s8_m(pg, op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_x(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_z(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_m(pg: svbool_t, op1: svint16_t) -> svuint16_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv8i16")] + fn _svqshlu_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshlu_n_s16_m(pg.into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_x(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_z(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_m(pg: svbool_t, op1: svint32_t) -> svuint32_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv4i32")] + fn _svqshlu_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshlu_n_s32_m(pg.into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_x(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_z(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_m(pg: svbool_t, op1: svint64_t) -> svuint64_t { + static_assert_range!(IMM2, 0, 63); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv2i64")] + fn _svqshlu_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svqshlu_n_s64_m(pg.into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_x(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_z(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv8i16" + )] + fn _svqshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv4i32" + )] + fn _svqshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv2i64" + )] + fn _svqshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv8i16" + )] + fn _svqshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv4i32" + )] + fn _svqshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv2i64" + )] + fn _svqshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv8i16" + )] + fn _svqshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv4i32" + )] + fn _svqshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv2i64" + )] + fn _svqshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv8i16" + )] + fn _svqshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv4i32" + )] + fn _svqshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv2i64" + )] + fn _svqshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv8i16" + )] + fn _svqshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv4i32" + )] + fn _svqshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv2i64" + )] + fn _svqshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv8i16" + )] + fn _svqshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv4i32" + )] + fn _svqshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv2i64" + )] + fn _svqshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv16i8")] + fn _svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv8i16")] + fn _svqsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv4i32")] + fn _svqsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv2i64")] + fn _svqsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv16i8")] + fn _svqsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv8i16")] + fn _svqsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv4i32")] + fn _svqsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv2i64")] + fn _svqsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv16i8")] + fn _svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv8i16")] + fn _svqsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv4i32")] + fn _svqsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv2i64")] + fn _svqsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv16i8")] + fn _svqsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv8i16")] + fn _svqsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv4i32")] + fn _svqsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv2i64")] + fn _svqsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s16(op: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv8i16")] + fn _svqxtnb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_s16(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s32(op: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv4i32")] + fn _svqxtnb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_s32(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s64(op: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv2i64")] + fn _svqxtnb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_s64(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u16(op: svuint16_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv8i16")] + fn _svqxtnb_u16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u32(op: svuint32_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv4i32")] + fn _svqxtnb_u32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u64(op: svuint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv2i64")] + fn _svqxtnb_u64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv8i16")] + fn _svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_s16(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv4i32")] + fn _svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_s32(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv2i64")] + fn _svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_s64(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u16(even: svuint8_t, op: svuint16_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv8i16")] + fn _svqxtnt_u16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_u16(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u32(even: svuint16_t, op: svuint32_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv4i32")] + fn _svqxtnt_u32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_u32(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u64(even: svuint32_t, op: svuint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv2i64")] + fn _svqxtnt_u64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_u64(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s16(op: svint16_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv8i16" + )] + fn _svqxtunb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunb_s16(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s32(op: svint32_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv4i32" + )] + fn _svqxtunb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunb_s32(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s64(op: svint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv2i64" + )] + fn _svqxtunb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunb_s64(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s16(even: svuint8_t, op: svint16_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv8i16" + )] + fn _svqxtunt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunt_s16(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s32(even: svuint16_t, op: svint32_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv4i32" + )] + fn _svqxtunt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunt_s32(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s64(even: svuint32_t, op: svint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv2i64" + )] + fn _svqxtunt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunt_s64(even.as_signed(), op).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv8i16" + )] + fn _svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnb_s16(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svraddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv4i32" + )] + fn _svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnb_s32(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svraddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv2i64" + )] + fn _svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnb_s64(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svraddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv8i16" + )] + fn _svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnt_s16(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svraddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv4i32" + )] + fn _svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnt_s32(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svraddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv2i64" + )] + fn _svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnt_s64(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svraddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rax1")] + fn _svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrax1_s64(op1, op2) } +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svrax1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urecpe.nxv4i32")] + fn _svrecpe_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrecpe_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(op, pg, op) +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv16i8")] + fn _svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_s8_m(pg, op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv8i16")] + fn _svrhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_s16_m(pg.into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv4i32")] + fn _svrhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_s32_m(pg.into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv2i64")] + fn _svrhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_s64_m(pg.into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv16i8")] + fn _svrhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv8i16")] + fn _svrhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv4i32")] + fn _svrhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv2i64")] + fn _svrhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv16i8")] + fn _svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_s8_m(pg, op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv8i16")] + fn _svrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_s16_m(pg.into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv4i32")] + fn _svrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_s32_m(pg.into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv2i64")] + fn _svrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_s64_m(pg.into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv16i8")] + fn _svrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv8i16")] + fn _svrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv4i32")] + fn _svrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv2i64")] + fn _svrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv16i8")] + fn _svrshr_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv8i16")] + fn _svrshr_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_s16_m(pg.into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv4i32")] + fn _svrshr_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_s32_m(pg.into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv2i64")] + fn _svrshr_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_s64_m(pg.into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_m(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv16i8")] + fn _svrshr_n_u8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_u8_m(pg, op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_x(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_z(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, svsel_u8(pg, op1, svdup_n_u8(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_m(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv8i16")] + fn _svrshr_n_u16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_u16_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_x(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_z(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, svsel_u16(pg, op1, svdup_n_u16(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_m(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv4i32")] + fn _svrshr_n_u32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_u32_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_x(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_z(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, svsel_u32(pg, op1, svdup_n_u32(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_m(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + static_assert_range!(IMM2, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv2i64")] + fn _svrshr_n_u64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_u64_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_x(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_z(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, svsel_u64(pg, op1, svdup_n_u64(0))) +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv8i16")] + fn _svrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv4i32")] + fn _svrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv2i64")] + fn _svrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe { svrshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe { svrshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe { svrshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv8i16")] + fn _svrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv4i32")] + fn _svrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv2i64")] + fn _svrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe { svrshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe { svrshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe { svrshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ursqrte.nxv4i32" + )] + fn _svrsqrte_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrsqrte_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(op, pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv16i8")] + fn _svrsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv8i16")] + fn _svrsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv4i32")] + fn _svrsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv2i64")] + fn _svrsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv16i8")] + fn _svrsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv8i16")] + fn _svrsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv4i32")] + fn _svrsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv2i64")] + fn _svrsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv8i16" + )] + fn _svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnb_s16(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv4i32" + )] + fn _svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnb_s32(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv2i64" + )] + fn _svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnb_s64(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv8i16" + )] + fn _svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnt_s16(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv4i32" + )] + fn _svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnt_s32(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv2i64" + )] + fn _svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnt_s64(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv4i32")] + fn _svsbclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv2i64")] + fn _svsbclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv4i32")] + fn _svsbclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv2i64")] + fn _svsbclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv8i16")] + fn _svshllb_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv4i32")] + fn _svshllb_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv2i64")] + fn _svshllb_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv8i16")] + fn _svshllb_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv4i32")] + fn _svshllb_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv2i64")] + fn _svshllb_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv8i16")] + fn _svshllt_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv4i32")] + fn _svshllt_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv2i64")] + fn _svshllt_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv8i16")] + fn _svshllt_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv4i32")] + fn _svshllt_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv2i64")] + fn _svshllt_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv8i16")] + fn _svshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnb_n_s16(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv4i32")] + fn _svshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnb_n_s32(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv2i64")] + fn _svshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnb_n_s64(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe { svshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe { svshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe { svshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv8i16")] + fn _svshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv4i32")] + fn _svshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv2i64")] + fn _svshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe { svshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe { svshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe { svshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv16i8")] + fn _svsli_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsli_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv8i16")] + fn _svsli_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsli_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv4i32")] + fn _svsli_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsli_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0, 63); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv2i64")] + fn _svsli_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsli_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0, 7); + unsafe { svsli_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0, 15); + unsafe { svsli_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0, 31); + unsafe { svsli_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0, 63); + unsafe { svsli_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 encryption and decryption"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4e[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[cfg_attr(test, assert_instr(sm4e))] +pub fn svsm4e_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4e")] + fn _svsm4e_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4e_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 key updates"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4ekey[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[cfg_attr(test, assert_instr(sm4ekey))] +pub fn svsm4ekey_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4ekey")] + fn _svsm4ekey_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4ekey_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv16i8")] + fn _svsqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsqadd_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv8i16")] + fn _svsqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsqadd_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv4i32")] + fn _svsqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsqadd_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv2i64")] + fn _svsqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsqadd_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv16i8")] + fn _svsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv8i16")] + fn _svsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv4i32")] + fn _svsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv2i64")] + fn _svsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv16i8")] + fn _svsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv8i16")] + fn _svsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv4i32")] + fn _svsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv2i64")] + fn _svsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv16i8")] + fn _svsri_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsri_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv8i16")] + fn _svsri_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsri_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv4i32")] + fn _svsri_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsri_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv2i64")] + fn _svsri_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsri_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe { svsri_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe { svsri_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe { svsri_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe { svsri_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2f64" + )] + fn _svstnt1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_f64(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i64" + )] + fn _svstnt1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_s64(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2f64" + )] + fn _svstnt1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_f64(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i64" + )] + fn _svstnt1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_s64(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32" + )] + fn _svstnt1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_f32(data, pg.into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32" + )] + fn _svstnt1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_s32(data, pg.into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svstnt1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svstnt1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svstnt1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svstnt1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_f32(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_s32(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_f64(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_s64(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i8" + )] + fn _svstnt1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svstnt1b_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i16" + )] + fn _svstnt1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svstnt1h_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i32" + )] + fn _svstnt1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svstnt1w_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8" + )] + fn _svstnt1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svstnt1b_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16" + )] + fn _svstnt1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svstnt1h_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svstnt1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1b_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svstnt1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1h_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svstnt1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1b_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svstnt1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1h_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svstnt1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1w_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i16" + )] + fn _svstnt1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svstnt1h_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i32" + )] + fn _svstnt1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svstnt1w_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv8i16")] + fn _svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnb_s16(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv4i32")] + fn _svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnb_s32(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv2i64")] + fn _svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnb_s64(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv8i16")] + fn _svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnt_s16(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv4i32")] + fn _svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnt_s32(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv2i64")] + fn _svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnt_s64(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv8i16")] + fn _svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_s16(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv4i32")] + fn _svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_s32(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv2i64")] + fn _svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_s64(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv8i16")] + fn _svsublb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv4i32")] + fn _svsublb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv2i64")] + fn _svsublb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv8i16" + )] + fn _svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublbt_s16(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv4i32" + )] + fn _svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublbt_s32(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv2i64" + )] + fn _svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublbt_s64(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv8i16")] + fn _svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_s16(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv4i32")] + fn _svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_s32(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv2i64")] + fn _svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_s64(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv8i16")] + fn _svsublt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv4i32")] + fn _svsublt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv2i64")] + fn _svsublt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv8i16" + )] + fn _svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubltb_s16(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsubltb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv4i32" + )] + fn _svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubltb_s32(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsubltb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv2i64" + )] + fn _svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubltb_s64(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsubltb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv8i16")] + fn _svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_s16(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv4i32")] + fn _svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_s32(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv2i64")] + fn _svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_s64(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv8i16")] + fn _svsubwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv4i32")] + fn _svsubwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv2i64")] + fn _svsubwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv8i16")] + fn _svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_s16(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv4i32")] + fn _svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_s32(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv2i64")] + fn _svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_s64(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv8i16")] + fn _svsubwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv4i32")] + fn _svsubwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv2i64")] + fn _svsubwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f32(data: svfloat32x2_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4f32")] + fn _svtbl2_f32(data0: svfloat32_t, data1: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { + _svtbl2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f64(data: svfloat64x2_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2f64")] + fn _svtbl2_f64(data0: svfloat64_t, data1: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { + _svtbl2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s8(data: svint8x2_t, indices: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv16i8")] + fn _svtbl2_s8(data0: svint8_t, data1: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { + _svtbl2_s8( + svget2_s8::<0>(data), + svget2_s8::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s16(data: svint16x2_t, indices: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv8i16")] + fn _svtbl2_s16(data0: svint16_t, data1: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { + _svtbl2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s32(data: svint32x2_t, indices: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4i32")] + fn _svtbl2_s32(data0: svint32_t, data1: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { + _svtbl2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s64(data: svint64x2_t, indices: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2i64")] + fn _svtbl2_s64(data0: svint64_t, data1: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { + _svtbl2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u8(data: svuint8x2_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl2_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u16(data: svuint16x2_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl2_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u32(data: svuint32x2_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl2_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u64(data: svuint64x2_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl2_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4f32")] + fn _svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbx_f32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2f64")] + fn _svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbx_f64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv16i8")] + fn _svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbx_s8(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv8i16")] + fn _svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbx_s16(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4i32")] + fn _svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbx_s32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2i64")] + fn _svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbx_s64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u8(fallback: svuint8_t, data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbx_s8(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u16(fallback: svuint16_t, data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbx_s16(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u32(fallback: svuint32_t, data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbx_s32(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u64(fallback: svuint64_t, data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbx_s64(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_b])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(punpkhi))] +pub fn svunpkhi_b(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpkhi.nxv16i1" + )] + fn _svunpkhi_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpkhi_b(op).into() } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s16(op: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv8i16" + )] + fn _svunpkhi_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_s16(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s32(op: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv4i32" + )] + fn _svunpkhi_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_s32(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s64(op: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv2i64" + )] + fn _svunpkhi_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_s64(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv8i16" + )] + fn _svunpkhi_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv4i32" + )] + fn _svunpkhi_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv2i64" + )] + fn _svunpkhi_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_b])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(punpklo))] +pub fn svunpklo_b(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpklo.nxv16i1" + )] + fn _svunpklo_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpklo_b(op).into() } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s16(op: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv8i16" + )] + fn _svunpklo_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_s16(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s32(op: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv4i32" + )] + fn _svunpklo_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_s32(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s64(op: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv2i64" + )] + fn _svunpklo_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_s64(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv8i16" + )] + fn _svunpklo_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv4i32" + )] + fn _svunpklo_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv2i64" + )] + fn _svunpklo_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv16i8")] + fn _svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuqadd_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv8i16")] + fn _svuqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuqadd_s16_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv4i32")] + fn _svuqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuqadd_s32_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv2i64")] + fn _svuqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuqadd_s64_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i32" + )] + fn _svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i32" + )] + fn _svwhilege_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i32" + )] + fn _svwhilege_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i32" + )] + fn _svwhilege_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i64" + )] + fn _svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i64" + )] + fn _svwhilege_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i64" + )] + fn _svwhilege_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i64" + )] + fn _svwhilege_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i32" + )] + fn _svwhilege_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i32" + )] + fn _svwhilege_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i32" + )] + fn _svwhilege_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i32" + )] + fn _svwhilege_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i64" + )] + fn _svwhilege_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i64" + )] + fn _svwhilege_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i64" + )] + fn _svwhilege_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i64" + )] + fn _svwhilege_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i32" + )] + fn _svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i32" + )] + fn _svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i32" + )] + fn _svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i32" + )] + fn _svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i64" + )] + fn _svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i64" + )] + fn _svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i64" + )] + fn _svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i64" + )] + fn _svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i32" + )] + fn _svwhilegt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i32" + )] + fn _svwhilegt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i32" + )] + fn _svwhilegt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i32" + )] + fn _svwhilegt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i64" + )] + fn _svwhilegt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i64" + )] + fn _svwhilegt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i64" + )] + fn _svwhilegt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i64" + )] + fn _svwhilegt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilerw_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.b.nxv16i1.p0" + )] + fn _svwhilerw_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilerw_8ptr(op1, op2) +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilerw_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.h.nxv8i1.p0" + )] + fn _svwhilerw_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilerw_16ptr(op1, op2).into() +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilerw_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.s.nxv4i1.p0" + )] + fn _svwhilerw_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilerw_32ptr(op1, op2).into() +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilerw_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.d.nxv2i1.p0" + )] + fn _svwhilerw_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilerw_64ptr(op1, op2).into() +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilewr_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.b.nxv16i1.p0" + )] + fn _svwhilewr_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilewr_8ptr(op1, op2) +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilewr_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.h.nxv8i1.p0" + )] + fn _svwhilewr_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilewr_16ptr(op1, op2).into() +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilewr_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.s.nxv4i1.p0" + )] + fn _svwhilewr_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilewr_32ptr(op1, op2).into() +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilewr_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.d.nxv2i1.p0" + )] + fn _svwhilewr_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilewr_64ptr(op1, op2).into() +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv16i8")] + fn _svxar_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svxar_n_s8(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv8i16")] + fn _svxar_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svxar_n_s16(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")] + fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svxar_n_s32(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv2i64")] + fn _svxar_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svxar_n_s64(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe { svxar_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe { svxar_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe { svxar_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe { svxar_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs new file mode 100644 index 0000000000000..c9260b6c13774 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs @@ -0,0 +1,1883 @@ +#![allow(non_camel_case_types)] + +// ============================================================================ +// Imports +// ============================================================================ + +use super::simd_cast; + +// ============================================================================ +// SVE Predicate Types +// ============================================================================ + +/// SVE predicate type (1-bit predicate vector). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(1)] +#[repr(C)] +pub struct svbool_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svbool_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svbool_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE double-width predicate type (2-bit predicate vector). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(2)] +#[repr(C)] +pub struct svbool2_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svbool2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svbool2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE quad-width predicate type (4-bit predicate vector). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(4)] +#[repr(C)] +pub struct svbool4_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svbool4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svbool4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE octuple-width predicate type (8-bit predicate vector). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svbool8_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svbool8_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svbool8_t { + fn clone(&self) -> Self { + *self + } +} + +// ============================================================================ +// Predicate Generation Functions +// ============================================================================ +// +// These functions generate predicate vectors for loop control and conditional +// operations. They provide convenient wrappers around LLVM SVE intrinsics. + +unsafe extern "C" { + #[link_name = "llvm.aarch64.sve.whilelt"] + fn __llvm_sve_whilelt_i32(i: i32, n: i32) -> svbool_t; +} + +/// Generate a predicate for while less-than comparison. +/// +/// This function generates a predicate vector where each element is true +/// if the corresponding index (starting from `i`) is less than `n`. +/// +/// This is a convenience wrapper for loop control in SVE code. For more +/// specific variants (e.g., `svwhilelt_b32_s32`), see the functions in +/// the `sve` module. +/// +/// # Safety +/// +/// This function is marked unsafe because it requires the `sve` target feature. +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svwhilelt_b32(i: i32, n: i32) -> svbool_t { + __llvm_sve_whilelt_i32(i, n) +} + +// ============================================================================ +// SVE Vector Types - Signed Integers +// ============================================================================ + +/// SVE 8-bit signed integer vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svint8_t(i8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint8_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint8_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit signed integer vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svint16_t(i16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint16_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint16_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit signed integer vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(4)] +#[repr(C)] +pub struct svint32_t(i32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint32_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint32_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit signed integer vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(2)] +#[repr(C)] +pub struct svint64_t(i64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint64_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint64_t { + fn clone(&self) -> Self { + *self + } +} + +// ============================================================================ +// SVE Vector Types - Unsigned Integers +// ============================================================================ + +/// SVE 8-bit unsigned integer vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svuint8_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint8_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint8_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit unsigned integer vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svuint16_t(u16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint16_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint16_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit unsigned integer vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(4)] +#[repr(C)] +pub struct svuint32_t(u32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint32_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint32_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit unsigned integer vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(2)] +#[repr(C)] +pub struct svuint64_t(u64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint64_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint64_t { + fn clone(&self) -> Self { + *self + } +} + +// ============================================================================ +// SVE Vector Types - Floating Point +// ============================================================================ + +/// SVE 32-bit floating-point vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(4)] +#[repr(C)] +pub struct svfloat32_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit floating-point vector. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(2)] +#[repr(C)] +pub struct svfloat64_t(f64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat64_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat64_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit floating-point vector (uses f32 as underlying type). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svfloat16_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16_t { + fn clone(&self) -> Self { + *self + } +} + +// ============================================================================ +// SVE Vector Tuple Types - x2 (Double Vectors) +// ============================================================================ + +/// SVE 8-bit signed integer double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(32)] +#[repr(C)] +pub struct svint8x2_t(i8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint8x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint8x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 8-bit unsigned integer double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(32)] +#[repr(C)] +pub struct svuint8x2_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint8x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint8x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit signed integer double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svint16x2_t(i16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint16x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint16x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit unsigned integer double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svuint16x2_t(u16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint16x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint16x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit signed integer double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svint32x2_t(i32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint32x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint32x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit unsigned integer double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svuint32x2_t(u32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint32x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint32x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit signed integer double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(4)] +#[repr(C)] +pub struct svint64x2_t(i64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint64x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint64x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit unsigned integer double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(4)] +#[repr(C)] +pub struct svuint64x2_t(u64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint64x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint64x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit floating-point double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svfloat32x2_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit floating-point double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(4)] +#[repr(C)] +pub struct svfloat64x2_t(f64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat64x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat64x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit floating-point double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svfloat16x2_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16x2_t { + fn clone(&self) -> Self { + *self + } +} + +// ============================================================================ +// SVE Vector Tuple Types - x3 (Triple Vectors) +// ============================================================================ + +/// SVE 8-bit signed integer triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(48)] +#[repr(C)] +pub struct svint8x3_t(i8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint8x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint8x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 8-bit unsigned integer triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(48)] +#[repr(C)] +pub struct svuint8x3_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint8x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint8x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit signed integer triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(24)] +#[repr(C)] +pub struct svint16x3_t(i16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint16x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint16x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit unsigned integer triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(24)] +#[repr(C)] +pub struct svuint16x3_t(u16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint16x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint16x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit signed integer triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(12)] +#[repr(C)] +pub struct svint32x3_t(i32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint32x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint32x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit unsigned integer triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(12)] +#[repr(C)] +pub struct svuint32x3_t(u32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint32x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint32x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit signed integer triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(6)] +#[repr(C)] +pub struct svint64x3_t(i64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint64x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint64x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit unsigned integer triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(6)] +#[repr(C)] +pub struct svuint64x3_t(u64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint64x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint64x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit floating-point triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(12)] +#[repr(C)] +pub struct svfloat32x3_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit floating-point triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(6)] +#[repr(C)] +pub struct svfloat64x3_t(f64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat64x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat64x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit floating-point triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(24)] +#[repr(C)] +pub struct svfloat16x3_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16x3_t { + fn clone(&self) -> Self { + *self + } +} + +// ============================================================================ +// SVE Vector Tuple Types - x4 (Quadruple Vectors) +// ============================================================================ + +/// SVE 8-bit signed integer quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(64)] +#[repr(C)] +pub struct svint8x4_t(i8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint8x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint8x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 8-bit unsigned integer quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(64)] +#[repr(C)] +pub struct svuint8x4_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint8x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint8x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit signed integer quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(32)] +#[repr(C)] +pub struct svint16x4_t(i16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint16x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint16x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit unsigned integer quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(32)] +#[repr(C)] +pub struct svuint16x4_t(u16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint16x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint16x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit signed integer quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svint32x4_t(i32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint32x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint32x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit unsigned integer quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svuint32x4_t(u32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint32x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint32x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit signed integer quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svint64x4_t(i64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint64x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint64x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit unsigned integer quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svuint64x4_t(u64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint64x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint64x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit floating-point quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svfloat32x4_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit floating-point quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svfloat64x4_t(f64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat64x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat64x4_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit floating-point quadruple vector (x4). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(32)] +#[repr(C)] +pub struct svfloat16x4_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16x4_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16x4_t { + fn clone(&self) -> Self { + *self + } +} + +// ============================================================================ +// SVE Auxiliary Types +// ============================================================================ + +/// SVE pattern type for vector length specification. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] +pub struct svpattern(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svpattern { + /// Create a pattern value from a raw byte. + #[inline(always)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const fn from_raw(value: u8) -> Self { + svpattern(value) + } + + /// Return the pattern value as a raw byte. + #[inline(always)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const fn as_raw(self) -> u8 { + self.0 + } + + // Pattern constants + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_ALL: svpattern = svpattern(31); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL1: svpattern = svpattern(1); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL2: svpattern = svpattern(2); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL3: svpattern = svpattern(3); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL4: svpattern = svpattern(4); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL5: svpattern = svpattern(5); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL6: svpattern = svpattern(6); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL7: svpattern = svpattern(7); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL8: svpattern = svpattern(8); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL16: svpattern = svpattern(9); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL32: svpattern = svpattern(10); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL64: svpattern = svpattern(11); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL128: svpattern = svpattern(12); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL256: svpattern = svpattern(13); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_POW2: svpattern = svpattern(30); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_MUL4: svpattern = svpattern(29); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_MUL3: svpattern = svpattern(28); +} + +/// SVE prefetch operation type. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] +pub struct svprfop(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svprfop { + /// Create a prefetch operation value from a raw byte. + #[inline(always)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const fn from_raw(value: u8) -> Self { + svprfop(value) + } + + /// Return the prefetch operation value as a raw byte. + #[inline(always)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const fn as_raw(self) -> u8 { + self.0 + } + + // Prefetch operation constants + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL1KEEP: svprfop = svprfop(0); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL1STRM: svprfop = svprfop(1); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL2KEEP: svprfop = svprfop(2); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL2STRM: svprfop = svprfop(3); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL3KEEP: svprfop = svprfop(4); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL3STRM: svprfop = svprfop(5); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL1KEEP: svprfop = svprfop(8); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL1STRM: svprfop = svprfop(9); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL2KEEP: svprfop = svprfop(10); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL2STRM: svprfop = svprfop(11); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL3KEEP: svprfop = svprfop(12); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL3STRM: svprfop = svprfop(13); +} + +// ============================================================================ +// Predicate Type Conversion Methods +// ============================================================================ +// +// These methods provide conversion APIs similar to From::from but with +// #[target_feature(enable = "sve")] for cross-compilation support. +// The simd_cast function is defined in the parent module (mod.rs) and uses +// transmute_copy to avoid E0511 errors. +// +// Note: These methods are organized by the source type for clarity. + +/// Conversion methods for svbool_t. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool_t { + /// Convert to svbool2_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool2(self) -> svbool2_t { + simd_cast(self) + } + + /// Convert to svbool4_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool4(self) -> svbool4_t { + simd_cast(self) + } + + /// Convert to svbool8_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool8(self) -> svbool8_t { + simd_cast(self) + } +} + +/// Conversion methods for svbool2_t. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool2_t { + /// Create from svbool_t (similar to From::from). + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// Convert to svbool_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } + + /// Convert to svbool4_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool4(self) -> svbool4_t { + simd_cast(self) + } + + /// Convert to svbool8_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool8(self) -> svbool8_t { + simd_cast(self) + } + + /// Create from svbool4_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool4(x: svbool4_t) -> Self { + simd_cast(x) + } + + /// Create from svbool8_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool8(x: svbool8_t) -> Self { + simd_cast(x) + } +} + +/// Conversion methods for svbool4_t. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool4_t { + /// Create from svbool_t (similar to From::from). + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// Convert to svbool_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } + + /// Convert to svbool2_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool2(self) -> svbool2_t { + simd_cast(self) + } + + /// Convert to svbool8_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool8(self) -> svbool8_t { + simd_cast(self) + } + + /// Create from svbool2_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool2(x: svbool2_t) -> Self { + simd_cast(x) + } + + /// Create from svbool8_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool8(x: svbool8_t) -> Self { + simd_cast(x) + } +} + +/// Conversion methods for svbool8_t. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool8_t { + /// Create from svbool_t (similar to From::from). + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// Convert to svbool_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } + + /// Convert to svbool2_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool2(self) -> svbool2_t { + simd_cast(self) + } + + /// Convert to svbool4_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool4(self) -> svbool4_t { + simd_cast(self) + } + + /// Create from svbool2_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool2(x: svbool2_t) -> Self { + simd_cast(x) + } + + /// Create from svbool4_t. + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool4(x: svbool4_t) -> Self { + simd_cast(x) + } +} + +// ============================================================================ +// From Trait Implementations for Predicate Types +// ============================================================================ +// +// These implementations are used for .into() calls in generated code. +// Note: These implementations do not use target_feature because the From +// trait cannot have that attribute. The type conversion itself is safe and +// does not involve actual SIMD operations. + +// Conversions from svbool_t to wider predicate types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool2_t { + #[inline(always)] + fn from(x: svbool_t) -> Self { + unsafe { simd_cast(x) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool4_t { + #[inline(always)] + fn from(x: svbool_t) -> Self { + unsafe { simd_cast(x) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool8_t { + #[inline(always)] + fn from(x: svbool_t) -> Self { + unsafe { simd_cast(x) } + } +} + +// Conversions from wider predicate types to svbool_t +// These implementations use transmute_copy for bit-level conversion. +// No target feature is required since transmute_copy is a pure bit-level +// operation that doesn't involve SVE instructions. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool2_t) -> Self { + unsafe { core::mem::transmute_copy(&x) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool4_t) -> Self { + unsafe { core::mem::transmute_copy(&x) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool8_t) -> Self { + unsafe { core::mem::transmute_copy(&x) } + } +} + +// ============================================================================ +// Vector Type Conversion Traits +// ============================================================================ +// +// These traits are used in generated code for converting between signed and +// unsigned vector types. They provide a consistent API for type conversions +// across all SVE vector types (single vectors and tuple vectors). + +/// Trait for converting to unsigned vector types. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub trait AsUnsigned { + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + type Unsigned; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + fn as_unsigned(self) -> Self::Unsigned; +} + +/// Trait for converting to signed vector types. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub trait AsSigned { + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + type Signed; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + fn as_signed(self) -> Self::Signed; +} + +// ============================================================================ +// AsUnsigned and AsSigned Implementations - Single Vectors +// ============================================================================ + +// 8-bit types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8_t { + type Unsigned = svuint8_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint8_t { + type Signed = svint8_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint8_t { + type Unsigned = svuint8_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint8_t { + type Signed = svint8_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 16-bit types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint16_t { + type Unsigned = svuint16_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint16_t { + type Signed = svint16_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint16_t { + type Unsigned = svuint16_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint16_t { + type Signed = svint16_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 32-bit types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint32_t { + type Unsigned = svuint32_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint32_t { + type Signed = svint32_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint32_t { + type Unsigned = svuint32_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint32_t { + type Signed = svint32_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 64-bit types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint64_t { + type Unsigned = svuint64_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint64_t { + type Signed = svint64_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint64_t { + type Unsigned = svuint64_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint64_t { + type Signed = svint64_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// ============================================================================ +// AsUnsigned and AsSigned Implementations - x2 Tuple Vectors +// ============================================================================ + +// 8-bit x2 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8x2_t { + type Unsigned = svuint8x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint8x2_t { + type Signed = svint8x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint8x2_t { + type Unsigned = svuint8x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint8x2_t { + type Signed = svint8x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 16-bit x2 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint16x2_t { + type Unsigned = svuint16x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint16x2_t { + type Signed = svint16x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint16x2_t { + type Unsigned = svuint16x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint16x2_t { + type Signed = svint16x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 32-bit x2 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint32x2_t { + type Unsigned = svuint32x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint32x2_t { + type Signed = svint32x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint32x2_t { + type Unsigned = svuint32x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint32x2_t { + type Signed = svint32x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 64-bit x2 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint64x2_t { + type Unsigned = svuint64x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint64x2_t { + type Signed = svint64x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint64x2_t { + type Unsigned = svuint64x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint64x2_t { + type Signed = svint64x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// ============================================================================ +// AsUnsigned and AsSigned Implementations - x3 Tuple Vectors +// ============================================================================ + +// 8-bit x3 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8x3_t { + type Unsigned = svuint8x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint8x3_t { + type Signed = svint8x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint8x3_t { + type Unsigned = svuint8x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint8x3_t { + type Signed = svint8x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 16-bit x3 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint16x3_t { + type Unsigned = svuint16x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint16x3_t { + type Signed = svint16x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint16x3_t { + type Unsigned = svuint16x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint16x3_t { + type Signed = svint16x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 32-bit x3 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint32x3_t { + type Unsigned = svuint32x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint32x3_t { + type Signed = svint32x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint32x3_t { + type Unsigned = svuint32x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint32x3_t { + type Signed = svint32x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 64-bit x3 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint64x3_t { + type Unsigned = svuint64x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint64x3_t { + type Signed = svint64x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint64x3_t { + type Unsigned = svuint64x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint64x3_t { + type Signed = svint64x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// ============================================================================ +// AsUnsigned and AsSigned Implementations - x4 Tuple Vectors +// ============================================================================ + +// 8-bit x4 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8x4_t { + type Unsigned = svuint8x4_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint8x4_t { + type Signed = svint8x4_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint8x4_t { + type Unsigned = svuint8x4_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint8x4_t { + type Signed = svint8x4_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 16-bit x4 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint16x4_t { + type Unsigned = svuint16x4_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint16x4_t { + type Signed = svint16x4_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint16x4_t { + type Unsigned = svuint16x4_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint16x4_t { + type Signed = svint16x4_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 32-bit x4 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint32x4_t { + type Unsigned = svuint32x4_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint32x4_t { + type Signed = svint32x4_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint32x4_t { + type Unsigned = svuint32x4_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint32x4_t { + type Signed = svint32x4_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// 64-bit x4 types +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint64x4_t { + type Unsigned = svuint64x4_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + self + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint64x4_t { + type Signed = svint64x4_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint64x4_t { + type Unsigned = svuint64x4_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint64x4_t { + type Signed = svint64x4_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + self + } +} + +// ============================================================================ +// LLVM Type Aliases +// ============================================================================ +// +// These type aliases map LLVM machine representations (nxv* types) to Rust +// SVE types. They are used by the code generator to match LLVM intrinsic +// signatures with Rust type definitions. + +// Signed integer type aliases +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv8i8 = svint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv4i8 = svint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv4i16 = svint16_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2i8 = svint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2i16 = svint16_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2i32 = svint32_t; + +// Unsigned integer type aliases +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv8u8 = svuint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv4u8 = svuint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv4u16 = svuint16_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2u8 = svuint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2u16 = svuint16_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2u32 = svuint32_t; diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index c58580f641780..db0c685b03ce5 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -35,7 +35,8 @@ x86_amx_intrinsics, f16, aarch64_unstable_target_feature, - bigint_helper_methods + bigint_helper_methods, + portable_simd )] #![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))] #![deny(clippy::missing_inline_in_public_items)] diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index e00b43353679e..511a237b963d9 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -48,6 +48,23 @@ macro_rules! static_assert_simm_bits { }; } +#[allow(unused_macros)] +macro_rules! static_assert_range { + ($imm:ident, $min:expr, $max:expr) => { + static_assert!( + $min <= $imm && $imm <= $max, + concat!( + stringify!($imm), + " is not in the range [", + stringify!($min), + ", ", + stringify!($max), + "]", + ) + ) + }; +} + #[allow(unused)] macro_rules! types { ( diff --git a/tests/codegen-llvm/scalable-vectors/simple.rs b/tests/codegen-llvm/scalable-vectors/simple.rs deleted file mode 100644 index 9706b4acab340..0000000000000 --- a/tests/codegen-llvm/scalable-vectors/simple.rs +++ /dev/null @@ -1,49 +0,0 @@ -//@ edition: 2021 -//@ only-aarch64 -#![crate_type = "lib"] -#![allow(incomplete_features, internal_features)] -#![feature(simd_ffi, rustc_attrs, link_llvm_intrinsics)] - -#[derive(Copy, Clone)] -#[rustc_scalable_vector(4)] -#[allow(non_camel_case_types)] -pub struct svint32_t(i32); - -#[inline(never)] -#[target_feature(enable = "sve")] -pub unsafe fn svdup_n_s32(op: i32) -> svint32_t { - extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] - fn _svdup_n_s32(op: i32) -> svint32_t; - } - unsafe { _svdup_n_s32(op) } -} - -#[inline] -#[target_feature(enable = "sve,sve2")] -pub unsafe fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { - extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")] - fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; - } - unsafe { _svxar_n_s32(op1, op2, IMM3) } -} - -#[inline(never)] -#[no_mangle] -#[target_feature(enable = "sve,sve2")] -// CHECK: define @pass_as_ref(ptr noalias noundef readonly align 16 captures(none) dereferenceable(16) %a, %b) -pub unsafe fn pass_as_ref(a: &svint32_t, b: svint32_t) -> svint32_t { - // CHECK: load , ptr %a, align 16 - svxar_n_s32::<1>(*a, b) -} - -#[no_mangle] -#[target_feature(enable = "sve,sve2")] -// CHECK: define @test() -pub unsafe fn test() -> svint32_t { - let a = svdup_n_s32(1); - let b = svdup_n_s32(2); - // CHECK: %_0 = call @pass_as_ref(ptr noalias noundef nonnull readonly align 16 dereferenceable(16) %a, %b) - pass_as_ref(&a, b) -}